Commit | Line | Data |
---|---|---|
dc009d92 EB |
1 | /* |
2 | * kexec.c - kexec system call | |
3 | * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com> | |
4 | * | |
5 | * This source code is licensed under the GNU General Public License, | |
6 | * Version 2. See the file COPYING for more details. | |
7 | */ | |
8 | ||
cb105258 VG |
9 | #define pr_fmt(fmt) "kexec: " fmt |
10 | ||
c59ede7b | 11 | #include <linux/capability.h> |
dc009d92 EB |
12 | #include <linux/mm.h> |
13 | #include <linux/file.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/fs.h> | |
16 | #include <linux/kexec.h> | |
8c5a1cf0 | 17 | #include <linux/mutex.h> |
dc009d92 EB |
18 | #include <linux/list.h> |
19 | #include <linux/highmem.h> | |
20 | #include <linux/syscalls.h> | |
21 | #include <linux/reboot.h> | |
dc009d92 | 22 | #include <linux/ioport.h> |
6e274d14 | 23 | #include <linux/hardirq.h> |
85916f81 MD |
24 | #include <linux/elf.h> |
25 | #include <linux/elfcore.h> | |
fd59d231 KO |
26 | #include <linux/utsname.h> |
27 | #include <linux/numa.h> | |
3ab83521 HY |
28 | #include <linux/suspend.h> |
29 | #include <linux/device.h> | |
89081d17 HY |
30 | #include <linux/freezer.h> |
31 | #include <linux/pm.h> | |
32 | #include <linux/cpu.h> | |
33 | #include <linux/console.h> | |
5f41b8cd | 34 | #include <linux/vmalloc.h> |
06a7f711 | 35 | #include <linux/swap.h> |
19234c08 | 36 | #include <linux/syscore_ops.h> |
52f5684c | 37 | #include <linux/compiler.h> |
8f1d26d0 | 38 | #include <linux/hugetlb.h> |
6e274d14 | 39 | |
dc009d92 EB |
40 | #include <asm/page.h> |
41 | #include <asm/uaccess.h> | |
42 | #include <asm/io.h> | |
fd59d231 | 43 | #include <asm/sections.h> |
dc009d92 | 44 | |
12db5562 VG |
45 | #include <crypto/hash.h> |
46 | #include <crypto/sha.h> | |
47 | ||
cc571658 | 48 | /* Per cpu memory for storing cpu states in case of system crash. */ |
43cf38eb | 49 | note_buf_t __percpu *crash_notes; |
cc571658 | 50 | |
fd59d231 | 51 | /* vmcoreinfo stuff */ |
edb79a21 | 52 | static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; |
fd59d231 | 53 | u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; |
d768281e KO |
54 | size_t vmcoreinfo_size; |
55 | size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); | |
fd59d231 | 56 | |
4fc9bbf9 KA |
57 | /* Flag to indicate we are going to kexec a new kernel */ |
58 | bool kexec_in_progress = false; | |
59 | ||
12db5562 VG |
60 | /* |
61 | * Declare these symbols weak so that if architecture provides a purgatory, | |
62 | * these will be overridden. | |
63 | */ | |
64 | char __weak kexec_purgatory[0]; | |
65 | size_t __weak kexec_purgatory_size = 0; | |
66 | ||
74ca317c | 67 | #ifdef CONFIG_KEXEC_FILE |
12db5562 | 68 | static int kexec_calculate_store_digests(struct kimage *image); |
74ca317c | 69 | #endif |
12db5562 | 70 | |
dc009d92 EB |
71 | /* Location of the reserved area for the crash kernel */ |
72 | struct resource crashk_res = { | |
73 | .name = "Crash kernel", | |
74 | .start = 0, | |
75 | .end = 0, | |
76 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | |
77 | }; | |
0212f915 | 78 | struct resource crashk_low_res = { |
157752d8 | 79 | .name = "Crash kernel", |
0212f915 YL |
80 | .start = 0, |
81 | .end = 0, | |
82 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | |
83 | }; | |
dc009d92 | 84 | |
6e274d14 AN |
85 | int kexec_should_crash(struct task_struct *p) |
86 | { | |
b460cbc5 | 87 | if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops) |
6e274d14 AN |
88 | return 1; |
89 | return 0; | |
90 | } | |
91 | ||
dc009d92 EB |
92 | /* |
93 | * When kexec transitions to the new kernel there is a one-to-one | |
94 | * mapping between physical and virtual addresses. On processors | |
95 | * where you can disable the MMU this is trivial, and easy. For | |
96 | * others it is still a simple predictable page table to setup. | |
97 | * | |
98 | * In that environment kexec copies the new kernel to its final | |
99 | * resting place. This means I can only support memory whose | |
100 | * physical address can fit in an unsigned long. In particular | |
101 | * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled. | |
102 | * If the assembly stub has more restrictive requirements | |
103 | * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be | |
104 | * defined more restrictively in <asm/kexec.h>. | |
105 | * | |
106 | * The code for the transition from the current kernel to the | |
107 | * the new kernel is placed in the control_code_buffer, whose size | |
163f6876 | 108 | * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single |
dc009d92 EB |
109 | * page of memory is necessary, but some architectures require more. |
110 | * Because this memory must be identity mapped in the transition from | |
111 | * virtual to physical addresses it must live in the range | |
112 | * 0 - TASK_SIZE, as only the user space mappings are arbitrarily | |
113 | * modifiable. | |
114 | * | |
115 | * The assembly stub in the control code buffer is passed a linked list | |
116 | * of descriptor pages detailing the source pages of the new kernel, | |
117 | * and the destination addresses of those source pages. As this data | |
118 | * structure is not used in the context of the current OS, it must | |
119 | * be self-contained. | |
120 | * | |
121 | * The code has been made to work with highmem pages and will use a | |
122 | * destination page in its final resting place (if it happens | |
123 | * to allocate it). The end product of this is that most of the | |
124 | * physical address space, and most of RAM can be used. | |
125 | * | |
126 | * Future directions include: | |
127 | * - allocating a page table with the control code buffer identity | |
128 | * mapped, to simplify machine_kexec and make kexec_on_panic more | |
129 | * reliable. | |
130 | */ | |
131 | ||
132 | /* | |
133 | * KIMAGE_NO_DEST is an impossible destination address..., for | |
134 | * allocating pages whose destination address we do not care about. | |
135 | */ | |
136 | #define KIMAGE_NO_DEST (-1UL) | |
137 | ||
72414d3f MS |
138 | static int kimage_is_destination_range(struct kimage *image, |
139 | unsigned long start, unsigned long end); | |
140 | static struct page *kimage_alloc_page(struct kimage *image, | |
9796fdd8 | 141 | gfp_t gfp_mask, |
72414d3f | 142 | unsigned long dest); |
dc009d92 | 143 | |
dabe7862 VG |
144 | static int copy_user_segment_list(struct kimage *image, |
145 | unsigned long nr_segments, | |
146 | struct kexec_segment __user *segments) | |
dc009d92 | 147 | { |
dabe7862 | 148 | int ret; |
dc009d92 | 149 | size_t segment_bytes; |
dc009d92 EB |
150 | |
151 | /* Read in the segments */ | |
152 | image->nr_segments = nr_segments; | |
153 | segment_bytes = nr_segments * sizeof(*segments); | |
dabe7862 VG |
154 | ret = copy_from_user(image->segment, segments, segment_bytes); |
155 | if (ret) | |
156 | ret = -EFAULT; | |
157 | ||
158 | return ret; | |
159 | } | |
160 | ||
161 | static int sanity_check_segment_list(struct kimage *image) | |
162 | { | |
163 | int result, i; | |
164 | unsigned long nr_segments = image->nr_segments; | |
dc009d92 EB |
165 | |
166 | /* | |
167 | * Verify we have good destination addresses. The caller is | |
168 | * responsible for making certain we don't attempt to load | |
169 | * the new image into invalid or reserved areas of RAM. This | |
170 | * just verifies it is an address we can use. | |
171 | * | |
172 | * Since the kernel does everything in page size chunks ensure | |
b595076a | 173 | * the destination addresses are page aligned. Too many |
dc009d92 EB |
174 | * special cases crop of when we don't do this. The most |
175 | * insidious is getting overlapping destination addresses | |
176 | * simply because addresses are changed to page size | |
177 | * granularity. | |
178 | */ | |
179 | result = -EADDRNOTAVAIL; | |
180 | for (i = 0; i < nr_segments; i++) { | |
181 | unsigned long mstart, mend; | |
72414d3f | 182 | |
dc009d92 EB |
183 | mstart = image->segment[i].mem; |
184 | mend = mstart + image->segment[i].memsz; | |
185 | if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) | |
dabe7862 | 186 | return result; |
dc009d92 | 187 | if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT) |
dabe7862 | 188 | return result; |
dc009d92 EB |
189 | } |
190 | ||
191 | /* Verify our destination addresses do not overlap. | |
192 | * If we alloed overlapping destination addresses | |
193 | * through very weird things can happen with no | |
194 | * easy explanation as one segment stops on another. | |
195 | */ | |
196 | result = -EINVAL; | |
72414d3f | 197 | for (i = 0; i < nr_segments; i++) { |
dc009d92 EB |
198 | unsigned long mstart, mend; |
199 | unsigned long j; | |
72414d3f | 200 | |
dc009d92 EB |
201 | mstart = image->segment[i].mem; |
202 | mend = mstart + image->segment[i].memsz; | |
72414d3f | 203 | for (j = 0; j < i; j++) { |
dc009d92 EB |
204 | unsigned long pstart, pend; |
205 | pstart = image->segment[j].mem; | |
206 | pend = pstart + image->segment[j].memsz; | |
207 | /* Do the segments overlap ? */ | |
208 | if ((mend > pstart) && (mstart < pend)) | |
dabe7862 | 209 | return result; |
dc009d92 EB |
210 | } |
211 | } | |
212 | ||
213 | /* Ensure our buffer sizes are strictly less than | |
214 | * our memory sizes. This should always be the case, | |
215 | * and it is easier to check up front than to be surprised | |
216 | * later on. | |
217 | */ | |
218 | result = -EINVAL; | |
72414d3f | 219 | for (i = 0; i < nr_segments; i++) { |
dc009d92 | 220 | if (image->segment[i].bufsz > image->segment[i].memsz) |
dabe7862 | 221 | return result; |
dc009d92 EB |
222 | } |
223 | ||
dabe7862 VG |
224 | /* |
225 | * Verify we have good destination addresses. Normally | |
226 | * the caller is responsible for making certain we don't | |
227 | * attempt to load the new image into invalid or reserved | |
228 | * areas of RAM. But crash kernels are preloaded into a | |
229 | * reserved area of ram. We must ensure the addresses | |
230 | * are in the reserved area otherwise preloading the | |
231 | * kernel could corrupt things. | |
232 | */ | |
72414d3f | 233 | |
dabe7862 VG |
234 | if (image->type == KEXEC_TYPE_CRASH) { |
235 | result = -EADDRNOTAVAIL; | |
236 | for (i = 0; i < nr_segments; i++) { | |
237 | unsigned long mstart, mend; | |
238 | ||
239 | mstart = image->segment[i].mem; | |
240 | mend = mstart + image->segment[i].memsz - 1; | |
241 | /* Ensure we are within the crash kernel limits */ | |
242 | if ((mstart < crashk_res.start) || | |
243 | (mend > crashk_res.end)) | |
244 | return result; | |
245 | } | |
246 | } | |
dc009d92 | 247 | |
dabe7862 VG |
248 | return 0; |
249 | } | |
250 | ||
251 | static struct kimage *do_kimage_alloc_init(void) | |
252 | { | |
253 | struct kimage *image; | |
254 | ||
255 | /* Allocate a controlling structure */ | |
256 | image = kzalloc(sizeof(*image), GFP_KERNEL); | |
257 | if (!image) | |
258 | return NULL; | |
259 | ||
260 | image->head = 0; | |
261 | image->entry = &image->head; | |
262 | image->last_entry = &image->head; | |
263 | image->control_page = ~0; /* By default this does not apply */ | |
264 | image->type = KEXEC_TYPE_DEFAULT; | |
265 | ||
266 | /* Initialize the list of control pages */ | |
267 | INIT_LIST_HEAD(&image->control_pages); | |
268 | ||
269 | /* Initialize the list of destination pages */ | |
270 | INIT_LIST_HEAD(&image->dest_pages); | |
271 | ||
272 | /* Initialize the list of unusable pages */ | |
273 | INIT_LIST_HEAD(&image->unusable_pages); | |
274 | ||
275 | return image; | |
dc009d92 EB |
276 | } |
277 | ||
b92e7e0d ZY |
278 | static void kimage_free_page_list(struct list_head *list); |
279 | ||
255aedd9 VG |
280 | static int kimage_alloc_init(struct kimage **rimage, unsigned long entry, |
281 | unsigned long nr_segments, | |
282 | struct kexec_segment __user *segments, | |
283 | unsigned long flags) | |
dc009d92 | 284 | { |
255aedd9 | 285 | int ret; |
dc009d92 | 286 | struct kimage *image; |
255aedd9 VG |
287 | bool kexec_on_panic = flags & KEXEC_ON_CRASH; |
288 | ||
289 | if (kexec_on_panic) { | |
290 | /* Verify we have a valid entry point */ | |
291 | if ((entry < crashk_res.start) || (entry > crashk_res.end)) | |
292 | return -EADDRNOTAVAIL; | |
293 | } | |
dc009d92 EB |
294 | |
295 | /* Allocate and initialize a controlling structure */ | |
dabe7862 VG |
296 | image = do_kimage_alloc_init(); |
297 | if (!image) | |
298 | return -ENOMEM; | |
299 | ||
300 | image->start = entry; | |
301 | ||
255aedd9 VG |
302 | ret = copy_user_segment_list(image, nr_segments, segments); |
303 | if (ret) | |
dabe7862 VG |
304 | goto out_free_image; |
305 | ||
255aedd9 VG |
306 | ret = sanity_check_segment_list(image); |
307 | if (ret) | |
dabe7862 | 308 | goto out_free_image; |
72414d3f | 309 | |
255aedd9 VG |
310 | /* Enable the special crash kernel control page allocation policy. */ |
311 | if (kexec_on_panic) { | |
312 | image->control_page = crashk_res.start; | |
313 | image->type = KEXEC_TYPE_CRASH; | |
314 | } | |
315 | ||
dc009d92 EB |
316 | /* |
317 | * Find a location for the control code buffer, and add it | |
318 | * the vector of segments so that it's pages will also be | |
319 | * counted as destination pages. | |
320 | */ | |
255aedd9 | 321 | ret = -ENOMEM; |
dc009d92 | 322 | image->control_code_page = kimage_alloc_control_pages(image, |
163f6876 | 323 | get_order(KEXEC_CONTROL_PAGE_SIZE)); |
dc009d92 | 324 | if (!image->control_code_page) { |
e1bebcf4 | 325 | pr_err("Could not allocate control_code_buffer\n"); |
dabe7862 | 326 | goto out_free_image; |
dc009d92 EB |
327 | } |
328 | ||
255aedd9 VG |
329 | if (!kexec_on_panic) { |
330 | image->swap_page = kimage_alloc_control_pages(image, 0); | |
331 | if (!image->swap_page) { | |
332 | pr_err("Could not allocate swap buffer\n"); | |
333 | goto out_free_control_pages; | |
334 | } | |
3ab83521 HY |
335 | } |
336 | ||
b92e7e0d ZY |
337 | *rimage = image; |
338 | return 0; | |
dabe7862 | 339 | out_free_control_pages: |
b92e7e0d | 340 | kimage_free_page_list(&image->control_pages); |
dabe7862 | 341 | out_free_image: |
b92e7e0d | 342 | kfree(image); |
255aedd9 | 343 | return ret; |
dc009d92 EB |
344 | } |
345 | ||
74ca317c | 346 | #ifdef CONFIG_KEXEC_FILE |
cb105258 VG |
347 | static int copy_file_from_fd(int fd, void **buf, unsigned long *buf_len) |
348 | { | |
349 | struct fd f = fdget(fd); | |
350 | int ret; | |
351 | struct kstat stat; | |
352 | loff_t pos; | |
353 | ssize_t bytes = 0; | |
354 | ||
355 | if (!f.file) | |
356 | return -EBADF; | |
357 | ||
358 | ret = vfs_getattr(&f.file->f_path, &stat); | |
359 | if (ret) | |
360 | goto out; | |
361 | ||
362 | if (stat.size > INT_MAX) { | |
363 | ret = -EFBIG; | |
364 | goto out; | |
365 | } | |
366 | ||
367 | /* Don't hand 0 to vmalloc, it whines. */ | |
368 | if (stat.size == 0) { | |
369 | ret = -EINVAL; | |
370 | goto out; | |
371 | } | |
372 | ||
373 | *buf = vmalloc(stat.size); | |
374 | if (!*buf) { | |
375 | ret = -ENOMEM; | |
376 | goto out; | |
377 | } | |
378 | ||
379 | pos = 0; | |
380 | while (pos < stat.size) { | |
381 | bytes = kernel_read(f.file, pos, (char *)(*buf) + pos, | |
382 | stat.size - pos); | |
383 | if (bytes < 0) { | |
384 | vfree(*buf); | |
385 | ret = bytes; | |
386 | goto out; | |
387 | } | |
388 | ||
389 | if (bytes == 0) | |
390 | break; | |
391 | pos += bytes; | |
392 | } | |
393 | ||
394 | if (pos != stat.size) { | |
395 | ret = -EBADF; | |
396 | vfree(*buf); | |
397 | goto out; | |
398 | } | |
399 | ||
400 | *buf_len = pos; | |
401 | out: | |
402 | fdput(f); | |
403 | return ret; | |
404 | } | |
405 | ||
406 | /* Architectures can provide this probe function */ | |
407 | int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf, | |
408 | unsigned long buf_len) | |
409 | { | |
410 | return -ENOEXEC; | |
411 | } | |
412 | ||
413 | void * __weak arch_kexec_kernel_image_load(struct kimage *image) | |
414 | { | |
415 | return ERR_PTR(-ENOEXEC); | |
416 | } | |
417 | ||
418 | void __weak arch_kimage_file_post_load_cleanup(struct kimage *image) | |
419 | { | |
420 | } | |
421 | ||
8e7d8381 VG |
422 | int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, |
423 | unsigned long buf_len) | |
424 | { | |
425 | return -EKEYREJECTED; | |
426 | } | |
427 | ||
12db5562 VG |
428 | /* Apply relocations of type RELA */ |
429 | int __weak | |
430 | arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, | |
431 | unsigned int relsec) | |
432 | { | |
433 | pr_err("RELA relocation unsupported.\n"); | |
434 | return -ENOEXEC; | |
435 | } | |
436 | ||
437 | /* Apply relocations of type REL */ | |
438 | int __weak | |
439 | arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, | |
440 | unsigned int relsec) | |
441 | { | |
442 | pr_err("REL relocation unsupported.\n"); | |
443 | return -ENOEXEC; | |
444 | } | |
445 | ||
cb105258 | 446 | /* |
ad699349 | 447 | * Free up memory used by kernel, initrd, and command line. This is temporary |
cb105258 VG |
448 | * memory allocation which is not needed any more after these buffers have |
449 | * been loaded into separate segments and have been copied elsewhere. | |
450 | */ | |
451 | static void kimage_file_post_load_cleanup(struct kimage *image) | |
452 | { | |
12db5562 VG |
453 | struct purgatory_info *pi = &image->purgatory_info; |
454 | ||
cb105258 VG |
455 | vfree(image->kernel_buf); |
456 | image->kernel_buf = NULL; | |
457 | ||
458 | vfree(image->initrd_buf); | |
459 | image->initrd_buf = NULL; | |
460 | ||
461 | kfree(image->cmdline_buf); | |
462 | image->cmdline_buf = NULL; | |
463 | ||
12db5562 VG |
464 | vfree(pi->purgatory_buf); |
465 | pi->purgatory_buf = NULL; | |
466 | ||
467 | vfree(pi->sechdrs); | |
468 | pi->sechdrs = NULL; | |
469 | ||
cb105258 VG |
470 | /* See if architecture has anything to cleanup post load */ |
471 | arch_kimage_file_post_load_cleanup(image); | |
27f48d3e VG |
472 | |
473 | /* | |
474 | * Above call should have called into bootloader to free up | |
475 | * any data stored in kimage->image_loader_data. It should | |
476 | * be ok now to free it up. | |
477 | */ | |
478 | kfree(image->image_loader_data); | |
479 | image->image_loader_data = NULL; | |
cb105258 VG |
480 | } |
481 | ||
482 | /* | |
483 | * In file mode list of segments is prepared by kernel. Copy relevant | |
484 | * data from user space, do error checking, prepare segment list | |
485 | */ | |
486 | static int | |
487 | kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd, | |
488 | const char __user *cmdline_ptr, | |
489 | unsigned long cmdline_len, unsigned flags) | |
490 | { | |
491 | int ret = 0; | |
492 | void *ldata; | |
493 | ||
494 | ret = copy_file_from_fd(kernel_fd, &image->kernel_buf, | |
495 | &image->kernel_buf_len); | |
496 | if (ret) | |
497 | return ret; | |
498 | ||
499 | /* Call arch image probe handlers */ | |
500 | ret = arch_kexec_kernel_image_probe(image, image->kernel_buf, | |
501 | image->kernel_buf_len); | |
502 | ||
503 | if (ret) | |
504 | goto out; | |
505 | ||
8e7d8381 VG |
506 | #ifdef CONFIG_KEXEC_VERIFY_SIG |
507 | ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf, | |
508 | image->kernel_buf_len); | |
509 | if (ret) { | |
510 | pr_debug("kernel signature verification failed.\n"); | |
511 | goto out; | |
512 | } | |
513 | pr_debug("kernel signature verification successful.\n"); | |
514 | #endif | |
cb105258 VG |
515 | /* It is possible that there no initramfs is being loaded */ |
516 | if (!(flags & KEXEC_FILE_NO_INITRAMFS)) { | |
517 | ret = copy_file_from_fd(initrd_fd, &image->initrd_buf, | |
518 | &image->initrd_buf_len); | |
519 | if (ret) | |
520 | goto out; | |
521 | } | |
522 | ||
523 | if (cmdline_len) { | |
524 | image->cmdline_buf = kzalloc(cmdline_len, GFP_KERNEL); | |
525 | if (!image->cmdline_buf) { | |
526 | ret = -ENOMEM; | |
527 | goto out; | |
528 | } | |
529 | ||
530 | ret = copy_from_user(image->cmdline_buf, cmdline_ptr, | |
531 | cmdline_len); | |
532 | if (ret) { | |
533 | ret = -EFAULT; | |
534 | goto out; | |
535 | } | |
536 | ||
537 | image->cmdline_buf_len = cmdline_len; | |
538 | ||
539 | /* command line should be a string with last byte null */ | |
540 | if (image->cmdline_buf[cmdline_len - 1] != '\0') { | |
541 | ret = -EINVAL; | |
542 | goto out; | |
543 | } | |
544 | } | |
545 | ||
546 | /* Call arch image load handlers */ | |
547 | ldata = arch_kexec_kernel_image_load(image); | |
548 | ||
549 | if (IS_ERR(ldata)) { | |
550 | ret = PTR_ERR(ldata); | |
551 | goto out; | |
552 | } | |
553 | ||
554 | image->image_loader_data = ldata; | |
555 | out: | |
556 | /* In case of error, free up all allocated memory in this function */ | |
557 | if (ret) | |
558 | kimage_file_post_load_cleanup(image); | |
559 | return ret; | |
560 | } | |
561 | ||
562 | static int | |
563 | kimage_file_alloc_init(struct kimage **rimage, int kernel_fd, | |
564 | int initrd_fd, const char __user *cmdline_ptr, | |
565 | unsigned long cmdline_len, unsigned long flags) | |
566 | { | |
567 | int ret; | |
568 | struct kimage *image; | |
dd5f7260 | 569 | bool kexec_on_panic = flags & KEXEC_FILE_ON_CRASH; |
cb105258 VG |
570 | |
571 | image = do_kimage_alloc_init(); | |
572 | if (!image) | |
573 | return -ENOMEM; | |
574 | ||
575 | image->file_mode = 1; | |
576 | ||
dd5f7260 VG |
577 | if (kexec_on_panic) { |
578 | /* Enable special crash kernel control page alloc policy. */ | |
579 | image->control_page = crashk_res.start; | |
580 | image->type = KEXEC_TYPE_CRASH; | |
581 | } | |
582 | ||
cb105258 VG |
583 | ret = kimage_file_prepare_segments(image, kernel_fd, initrd_fd, |
584 | cmdline_ptr, cmdline_len, flags); | |
585 | if (ret) | |
586 | goto out_free_image; | |
587 | ||
588 | ret = sanity_check_segment_list(image); | |
589 | if (ret) | |
590 | goto out_free_post_load_bufs; | |
591 | ||
592 | ret = -ENOMEM; | |
593 | image->control_code_page = kimage_alloc_control_pages(image, | |
594 | get_order(KEXEC_CONTROL_PAGE_SIZE)); | |
595 | if (!image->control_code_page) { | |
596 | pr_err("Could not allocate control_code_buffer\n"); | |
597 | goto out_free_post_load_bufs; | |
598 | } | |
599 | ||
dd5f7260 VG |
600 | if (!kexec_on_panic) { |
601 | image->swap_page = kimage_alloc_control_pages(image, 0); | |
602 | if (!image->swap_page) { | |
d5393955 | 603 | pr_err("Could not allocate swap buffer\n"); |
dd5f7260 VG |
604 | goto out_free_control_pages; |
605 | } | |
cb105258 VG |
606 | } |
607 | ||
608 | *rimage = image; | |
609 | return 0; | |
610 | out_free_control_pages: | |
611 | kimage_free_page_list(&image->control_pages); | |
612 | out_free_post_load_bufs: | |
613 | kimage_file_post_load_cleanup(image); | |
cb105258 VG |
614 | out_free_image: |
615 | kfree(image); | |
616 | return ret; | |
617 | } | |
74ca317c VG |
618 | #else /* CONFIG_KEXEC_FILE */ |
619 | static inline void kimage_file_post_load_cleanup(struct kimage *image) { } | |
620 | #endif /* CONFIG_KEXEC_FILE */ | |
cb105258 | 621 | |
72414d3f MS |
622 | static int kimage_is_destination_range(struct kimage *image, |
623 | unsigned long start, | |
624 | unsigned long end) | |
dc009d92 EB |
625 | { |
626 | unsigned long i; | |
627 | ||
628 | for (i = 0; i < image->nr_segments; i++) { | |
629 | unsigned long mstart, mend; | |
72414d3f | 630 | |
dc009d92 | 631 | mstart = image->segment[i].mem; |
72414d3f MS |
632 | mend = mstart + image->segment[i].memsz; |
633 | if ((end > mstart) && (start < mend)) | |
dc009d92 | 634 | return 1; |
dc009d92 | 635 | } |
72414d3f | 636 | |
dc009d92 EB |
637 | return 0; |
638 | } | |
639 | ||
9796fdd8 | 640 | static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) |
dc009d92 EB |
641 | { |
642 | struct page *pages; | |
72414d3f | 643 | |
dc009d92 EB |
644 | pages = alloc_pages(gfp_mask, order); |
645 | if (pages) { | |
646 | unsigned int count, i; | |
647 | pages->mapping = NULL; | |
4c21e2f2 | 648 | set_page_private(pages, order); |
dc009d92 | 649 | count = 1 << order; |
72414d3f | 650 | for (i = 0; i < count; i++) |
dc009d92 | 651 | SetPageReserved(pages + i); |
dc009d92 | 652 | } |
72414d3f | 653 | |
dc009d92 EB |
654 | return pages; |
655 | } | |
656 | ||
657 | static void kimage_free_pages(struct page *page) | |
658 | { | |
659 | unsigned int order, count, i; | |
72414d3f | 660 | |
4c21e2f2 | 661 | order = page_private(page); |
dc009d92 | 662 | count = 1 << order; |
72414d3f | 663 | for (i = 0; i < count; i++) |
dc009d92 | 664 | ClearPageReserved(page + i); |
dc009d92 EB |
665 | __free_pages(page, order); |
666 | } | |
667 | ||
668 | static void kimage_free_page_list(struct list_head *list) | |
669 | { | |
670 | struct list_head *pos, *next; | |
72414d3f | 671 | |
dc009d92 EB |
672 | list_for_each_safe(pos, next, list) { |
673 | struct page *page; | |
674 | ||
675 | page = list_entry(pos, struct page, lru); | |
676 | list_del(&page->lru); | |
dc009d92 EB |
677 | kimage_free_pages(page); |
678 | } | |
679 | } | |
680 | ||
72414d3f MS |
681 | static struct page *kimage_alloc_normal_control_pages(struct kimage *image, |
682 | unsigned int order) | |
dc009d92 EB |
683 | { |
684 | /* Control pages are special, they are the intermediaries | |
685 | * that are needed while we copy the rest of the pages | |
686 | * to their final resting place. As such they must | |
687 | * not conflict with either the destination addresses | |
688 | * or memory the kernel is already using. | |
689 | * | |
690 | * The only case where we really need more than one of | |
691 | * these are for architectures where we cannot disable | |
692 | * the MMU and must instead generate an identity mapped | |
693 | * page table for all of the memory. | |
694 | * | |
695 | * At worst this runs in O(N) of the image size. | |
696 | */ | |
697 | struct list_head extra_pages; | |
698 | struct page *pages; | |
699 | unsigned int count; | |
700 | ||
701 | count = 1 << order; | |
702 | INIT_LIST_HEAD(&extra_pages); | |
703 | ||
704 | /* Loop while I can allocate a page and the page allocated | |
705 | * is a destination page. | |
706 | */ | |
707 | do { | |
708 | unsigned long pfn, epfn, addr, eaddr; | |
72414d3f | 709 | |
dc009d92 EB |
710 | pages = kimage_alloc_pages(GFP_KERNEL, order); |
711 | if (!pages) | |
712 | break; | |
713 | pfn = page_to_pfn(pages); | |
714 | epfn = pfn + count; | |
715 | addr = pfn << PAGE_SHIFT; | |
716 | eaddr = epfn << PAGE_SHIFT; | |
717 | if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || | |
72414d3f | 718 | kimage_is_destination_range(image, addr, eaddr)) { |
dc009d92 EB |
719 | list_add(&pages->lru, &extra_pages); |
720 | pages = NULL; | |
721 | } | |
72414d3f MS |
722 | } while (!pages); |
723 | ||
dc009d92 EB |
724 | if (pages) { |
725 | /* Remember the allocated page... */ | |
726 | list_add(&pages->lru, &image->control_pages); | |
727 | ||
728 | /* Because the page is already in it's destination | |
729 | * location we will never allocate another page at | |
730 | * that address. Therefore kimage_alloc_pages | |
731 | * will not return it (again) and we don't need | |
732 | * to give it an entry in image->segment[]. | |
733 | */ | |
734 | } | |
735 | /* Deal with the destination pages I have inadvertently allocated. | |
736 | * | |
737 | * Ideally I would convert multi-page allocations into single | |
25985edc | 738 | * page allocations, and add everything to image->dest_pages. |
dc009d92 EB |
739 | * |
740 | * For now it is simpler to just free the pages. | |
741 | */ | |
742 | kimage_free_page_list(&extra_pages); | |
dc009d92 | 743 | |
72414d3f | 744 | return pages; |
dc009d92 EB |
745 | } |
746 | ||
72414d3f MS |
747 | static struct page *kimage_alloc_crash_control_pages(struct kimage *image, |
748 | unsigned int order) | |
dc009d92 EB |
749 | { |
750 | /* Control pages are special, they are the intermediaries | |
751 | * that are needed while we copy the rest of the pages | |
752 | * to their final resting place. As such they must | |
753 | * not conflict with either the destination addresses | |
754 | * or memory the kernel is already using. | |
755 | * | |
756 | * Control pages are also the only pags we must allocate | |
757 | * when loading a crash kernel. All of the other pages | |
758 | * are specified by the segments and we just memcpy | |
759 | * into them directly. | |
760 | * | |
761 | * The only case where we really need more than one of | |
762 | * these are for architectures where we cannot disable | |
763 | * the MMU and must instead generate an identity mapped | |
764 | * page table for all of the memory. | |
765 | * | |
766 | * Given the low demand this implements a very simple | |
767 | * allocator that finds the first hole of the appropriate | |
768 | * size in the reserved memory region, and allocates all | |
769 | * of the memory up to and including the hole. | |
770 | */ | |
771 | unsigned long hole_start, hole_end, size; | |
772 | struct page *pages; | |
72414d3f | 773 | |
dc009d92 EB |
774 | pages = NULL; |
775 | size = (1 << order) << PAGE_SHIFT; | |
776 | hole_start = (image->control_page + (size - 1)) & ~(size - 1); | |
777 | hole_end = hole_start + size - 1; | |
72414d3f | 778 | while (hole_end <= crashk_res.end) { |
dc009d92 | 779 | unsigned long i; |
72414d3f | 780 | |
3d214fae | 781 | if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT) |
dc009d92 | 782 | break; |
dc009d92 | 783 | /* See if I overlap any of the segments */ |
72414d3f | 784 | for (i = 0; i < image->nr_segments; i++) { |
dc009d92 | 785 | unsigned long mstart, mend; |
72414d3f | 786 | |
dc009d92 EB |
787 | mstart = image->segment[i].mem; |
788 | mend = mstart + image->segment[i].memsz - 1; | |
789 | if ((hole_end >= mstart) && (hole_start <= mend)) { | |
790 | /* Advance the hole to the end of the segment */ | |
791 | hole_start = (mend + (size - 1)) & ~(size - 1); | |
792 | hole_end = hole_start + size - 1; | |
793 | break; | |
794 | } | |
795 | } | |
796 | /* If I don't overlap any segments I have found my hole! */ | |
797 | if (i == image->nr_segments) { | |
798 | pages = pfn_to_page(hole_start >> PAGE_SHIFT); | |
799 | break; | |
800 | } | |
801 | } | |
72414d3f | 802 | if (pages) |
dc009d92 | 803 | image->control_page = hole_end; |
72414d3f | 804 | |
dc009d92 EB |
805 | return pages; |
806 | } | |
807 | ||
808 | ||
72414d3f MS |
809 | struct page *kimage_alloc_control_pages(struct kimage *image, |
810 | unsigned int order) | |
dc009d92 EB |
811 | { |
812 | struct page *pages = NULL; | |
72414d3f MS |
813 | |
814 | switch (image->type) { | |
dc009d92 EB |
815 | case KEXEC_TYPE_DEFAULT: |
816 | pages = kimage_alloc_normal_control_pages(image, order); | |
817 | break; | |
818 | case KEXEC_TYPE_CRASH: | |
819 | pages = kimage_alloc_crash_control_pages(image, order); | |
820 | break; | |
821 | } | |
72414d3f | 822 | |
dc009d92 EB |
823 | return pages; |
824 | } | |
825 | ||
826 | static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) | |
827 | { | |
72414d3f | 828 | if (*image->entry != 0) |
dc009d92 | 829 | image->entry++; |
72414d3f | 830 | |
dc009d92 EB |
831 | if (image->entry == image->last_entry) { |
832 | kimage_entry_t *ind_page; | |
833 | struct page *page; | |
72414d3f | 834 | |
dc009d92 | 835 | page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); |
72414d3f | 836 | if (!page) |
dc009d92 | 837 | return -ENOMEM; |
72414d3f | 838 | |
dc009d92 EB |
839 | ind_page = page_address(page); |
840 | *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; | |
841 | image->entry = ind_page; | |
72414d3f MS |
842 | image->last_entry = ind_page + |
843 | ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); | |
dc009d92 EB |
844 | } |
845 | *image->entry = entry; | |
846 | image->entry++; | |
847 | *image->entry = 0; | |
72414d3f | 848 | |
dc009d92 EB |
849 | return 0; |
850 | } | |
851 | ||
72414d3f MS |
852 | static int kimage_set_destination(struct kimage *image, |
853 | unsigned long destination) | |
dc009d92 EB |
854 | { |
855 | int result; | |
856 | ||
857 | destination &= PAGE_MASK; | |
858 | result = kimage_add_entry(image, destination | IND_DESTINATION); | |
72414d3f | 859 | |
dc009d92 EB |
860 | return result; |
861 | } | |
862 | ||
863 | ||
864 | static int kimage_add_page(struct kimage *image, unsigned long page) | |
865 | { | |
866 | int result; | |
867 | ||
868 | page &= PAGE_MASK; | |
869 | result = kimage_add_entry(image, page | IND_SOURCE); | |
72414d3f | 870 | |
dc009d92 EB |
871 | return result; |
872 | } | |
873 | ||
874 | ||
875 | static void kimage_free_extra_pages(struct kimage *image) | |
876 | { | |
877 | /* Walk through and free any extra destination pages I may have */ | |
878 | kimage_free_page_list(&image->dest_pages); | |
879 | ||
25985edc | 880 | /* Walk through and free any unusable pages I have cached */ |
7d3e2bca | 881 | kimage_free_page_list(&image->unusable_pages); |
dc009d92 EB |
882 | |
883 | } | |
7fccf032 | 884 | static void kimage_terminate(struct kimage *image) |
dc009d92 | 885 | { |
72414d3f | 886 | if (*image->entry != 0) |
dc009d92 | 887 | image->entry++; |
72414d3f | 888 | |
dc009d92 | 889 | *image->entry = IND_DONE; |
dc009d92 EB |
890 | } |
891 | ||
892 | #define for_each_kimage_entry(image, ptr, entry) \ | |
893 | for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ | |
e1bebcf4 FF |
894 | ptr = (entry & IND_INDIRECTION) ? \ |
895 | phys_to_virt((entry & PAGE_MASK)) : ptr + 1) | |
dc009d92 EB |
896 | |
897 | static void kimage_free_entry(kimage_entry_t entry) | |
898 | { | |
899 | struct page *page; | |
900 | ||
901 | page = pfn_to_page(entry >> PAGE_SHIFT); | |
902 | kimage_free_pages(page); | |
903 | } | |
904 | ||
905 | static void kimage_free(struct kimage *image) | |
906 | { | |
907 | kimage_entry_t *ptr, entry; | |
908 | kimage_entry_t ind = 0; | |
909 | ||
910 | if (!image) | |
911 | return; | |
72414d3f | 912 | |
dc009d92 EB |
913 | kimage_free_extra_pages(image); |
914 | for_each_kimage_entry(image, ptr, entry) { | |
915 | if (entry & IND_INDIRECTION) { | |
916 | /* Free the previous indirection page */ | |
72414d3f | 917 | if (ind & IND_INDIRECTION) |
dc009d92 | 918 | kimage_free_entry(ind); |
dc009d92 EB |
919 | /* Save this indirection page until we are |
920 | * done with it. | |
921 | */ | |
922 | ind = entry; | |
e1bebcf4 | 923 | } else if (entry & IND_SOURCE) |
dc009d92 | 924 | kimage_free_entry(entry); |
dc009d92 EB |
925 | } |
926 | /* Free the final indirection page */ | |
72414d3f | 927 | if (ind & IND_INDIRECTION) |
dc009d92 | 928 | kimage_free_entry(ind); |
dc009d92 EB |
929 | |
930 | /* Handle any machine specific cleanup */ | |
931 | machine_kexec_cleanup(image); | |
932 | ||
933 | /* Free the kexec control pages... */ | |
934 | kimage_free_page_list(&image->control_pages); | |
cb105258 | 935 | |
cb105258 VG |
936 | /* |
937 | * Free up any temporary buffers allocated. This might hit if | |
938 | * error occurred much later after buffer allocation. | |
939 | */ | |
940 | if (image->file_mode) | |
941 | kimage_file_post_load_cleanup(image); | |
942 | ||
dc009d92 EB |
943 | kfree(image); |
944 | } | |
945 | ||
72414d3f MS |
946 | static kimage_entry_t *kimage_dst_used(struct kimage *image, |
947 | unsigned long page) | |
dc009d92 EB |
948 | { |
949 | kimage_entry_t *ptr, entry; | |
950 | unsigned long destination = 0; | |
951 | ||
952 | for_each_kimage_entry(image, ptr, entry) { | |
72414d3f | 953 | if (entry & IND_DESTINATION) |
dc009d92 | 954 | destination = entry & PAGE_MASK; |
dc009d92 | 955 | else if (entry & IND_SOURCE) { |
72414d3f | 956 | if (page == destination) |
dc009d92 | 957 | return ptr; |
dc009d92 EB |
958 | destination += PAGE_SIZE; |
959 | } | |
960 | } | |
72414d3f | 961 | |
314b6a4d | 962 | return NULL; |
dc009d92 EB |
963 | } |
964 | ||
72414d3f | 965 | static struct page *kimage_alloc_page(struct kimage *image, |
9796fdd8 | 966 | gfp_t gfp_mask, |
72414d3f | 967 | unsigned long destination) |
dc009d92 EB |
968 | { |
969 | /* | |
970 | * Here we implement safeguards to ensure that a source page | |
971 | * is not copied to its destination page before the data on | |
972 | * the destination page is no longer useful. | |
973 | * | |
974 | * To do this we maintain the invariant that a source page is | |
975 | * either its own destination page, or it is not a | |
976 | * destination page at all. | |
977 | * | |
978 | * That is slightly stronger than required, but the proof | |
979 | * that no problems will not occur is trivial, and the | |
980 | * implementation is simply to verify. | |
981 | * | |
982 | * When allocating all pages normally this algorithm will run | |
983 | * in O(N) time, but in the worst case it will run in O(N^2) | |
984 | * time. If the runtime is a problem the data structures can | |
985 | * be fixed. | |
986 | */ | |
987 | struct page *page; | |
988 | unsigned long addr; | |
989 | ||
990 | /* | |
991 | * Walk through the list of destination pages, and see if I | |
992 | * have a match. | |
993 | */ | |
994 | list_for_each_entry(page, &image->dest_pages, lru) { | |
995 | addr = page_to_pfn(page) << PAGE_SHIFT; | |
996 | if (addr == destination) { | |
997 | list_del(&page->lru); | |
998 | return page; | |
999 | } | |
1000 | } | |
1001 | page = NULL; | |
1002 | while (1) { | |
1003 | kimage_entry_t *old; | |
1004 | ||
1005 | /* Allocate a page, if we run out of memory give up */ | |
1006 | page = kimage_alloc_pages(gfp_mask, 0); | |
72414d3f | 1007 | if (!page) |
314b6a4d | 1008 | return NULL; |
dc009d92 | 1009 | /* If the page cannot be used file it away */ |
72414d3f MS |
1010 | if (page_to_pfn(page) > |
1011 | (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { | |
7d3e2bca | 1012 | list_add(&page->lru, &image->unusable_pages); |
dc009d92 EB |
1013 | continue; |
1014 | } | |
1015 | addr = page_to_pfn(page) << PAGE_SHIFT; | |
1016 | ||
1017 | /* If it is the destination page we want use it */ | |
1018 | if (addr == destination) | |
1019 | break; | |
1020 | ||
1021 | /* If the page is not a destination page use it */ | |
72414d3f MS |
1022 | if (!kimage_is_destination_range(image, addr, |
1023 | addr + PAGE_SIZE)) | |
dc009d92 EB |
1024 | break; |
1025 | ||
1026 | /* | |
1027 | * I know that the page is someones destination page. | |
1028 | * See if there is already a source page for this | |
1029 | * destination page. And if so swap the source pages. | |
1030 | */ | |
1031 | old = kimage_dst_used(image, addr); | |
1032 | if (old) { | |
1033 | /* If so move it */ | |
1034 | unsigned long old_addr; | |
1035 | struct page *old_page; | |
1036 | ||
1037 | old_addr = *old & PAGE_MASK; | |
1038 | old_page = pfn_to_page(old_addr >> PAGE_SHIFT); | |
1039 | copy_highpage(page, old_page); | |
1040 | *old = addr | (*old & ~PAGE_MASK); | |
1041 | ||
1042 | /* The old page I have found cannot be a | |
f9092f35 JS |
1043 | * destination page, so return it if it's |
1044 | * gfp_flags honor the ones passed in. | |
dc009d92 | 1045 | */ |
f9092f35 JS |
1046 | if (!(gfp_mask & __GFP_HIGHMEM) && |
1047 | PageHighMem(old_page)) { | |
1048 | kimage_free_pages(old_page); | |
1049 | continue; | |
1050 | } | |
dc009d92 EB |
1051 | addr = old_addr; |
1052 | page = old_page; | |
1053 | break; | |
e1bebcf4 | 1054 | } else { |
dc009d92 EB |
1055 | /* Place the page on the destination list I |
1056 | * will use it later. | |
1057 | */ | |
1058 | list_add(&page->lru, &image->dest_pages); | |
1059 | } | |
1060 | } | |
72414d3f | 1061 | |
dc009d92 EB |
1062 | return page; |
1063 | } | |
1064 | ||
1065 | static int kimage_load_normal_segment(struct kimage *image, | |
72414d3f | 1066 | struct kexec_segment *segment) |
dc009d92 EB |
1067 | { |
1068 | unsigned long maddr; | |
310faaa9 | 1069 | size_t ubytes, mbytes; |
dc009d92 | 1070 | int result; |
cb105258 VG |
1071 | unsigned char __user *buf = NULL; |
1072 | unsigned char *kbuf = NULL; | |
dc009d92 EB |
1073 | |
1074 | result = 0; | |
cb105258 VG |
1075 | if (image->file_mode) |
1076 | kbuf = segment->kbuf; | |
1077 | else | |
1078 | buf = segment->buf; | |
dc009d92 EB |
1079 | ubytes = segment->bufsz; |
1080 | mbytes = segment->memsz; | |
1081 | maddr = segment->mem; | |
1082 | ||
1083 | result = kimage_set_destination(image, maddr); | |
72414d3f | 1084 | if (result < 0) |
dc009d92 | 1085 | goto out; |
72414d3f MS |
1086 | |
1087 | while (mbytes) { | |
dc009d92 EB |
1088 | struct page *page; |
1089 | char *ptr; | |
1090 | size_t uchunk, mchunk; | |
72414d3f | 1091 | |
dc009d92 | 1092 | page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); |
c80544dc | 1093 | if (!page) { |
dc009d92 EB |
1094 | result = -ENOMEM; |
1095 | goto out; | |
1096 | } | |
72414d3f MS |
1097 | result = kimage_add_page(image, page_to_pfn(page) |
1098 | << PAGE_SHIFT); | |
1099 | if (result < 0) | |
dc009d92 | 1100 | goto out; |
72414d3f | 1101 | |
dc009d92 EB |
1102 | ptr = kmap(page); |
1103 | /* Start with a clear page */ | |
3ecb01df | 1104 | clear_page(ptr); |
dc009d92 | 1105 | ptr += maddr & ~PAGE_MASK; |
31c3a3fe ZY |
1106 | mchunk = min_t(size_t, mbytes, |
1107 | PAGE_SIZE - (maddr & ~PAGE_MASK)); | |
1108 | uchunk = min(ubytes, mchunk); | |
72414d3f | 1109 | |
cb105258 VG |
1110 | /* For file based kexec, source pages are in kernel memory */ |
1111 | if (image->file_mode) | |
1112 | memcpy(ptr, kbuf, uchunk); | |
1113 | else | |
1114 | result = copy_from_user(ptr, buf, uchunk); | |
dc009d92 EB |
1115 | kunmap(page); |
1116 | if (result) { | |
f65a03f6 | 1117 | result = -EFAULT; |
dc009d92 EB |
1118 | goto out; |
1119 | } | |
1120 | ubytes -= uchunk; | |
1121 | maddr += mchunk; | |
cb105258 VG |
1122 | if (image->file_mode) |
1123 | kbuf += mchunk; | |
1124 | else | |
1125 | buf += mchunk; | |
dc009d92 EB |
1126 | mbytes -= mchunk; |
1127 | } | |
72414d3f | 1128 | out: |
dc009d92 EB |
1129 | return result; |
1130 | } | |
1131 | ||
1132 | static int kimage_load_crash_segment(struct kimage *image, | |
72414d3f | 1133 | struct kexec_segment *segment) |
dc009d92 EB |
1134 | { |
1135 | /* For crash dumps kernels we simply copy the data from | |
1136 | * user space to it's destination. | |
1137 | * We do things a page at a time for the sake of kmap. | |
1138 | */ | |
1139 | unsigned long maddr; | |
310faaa9 | 1140 | size_t ubytes, mbytes; |
dc009d92 | 1141 | int result; |
dd5f7260 VG |
1142 | unsigned char __user *buf = NULL; |
1143 | unsigned char *kbuf = NULL; | |
dc009d92 EB |
1144 | |
1145 | result = 0; | |
dd5f7260 VG |
1146 | if (image->file_mode) |
1147 | kbuf = segment->kbuf; | |
1148 | else | |
1149 | buf = segment->buf; | |
dc009d92 EB |
1150 | ubytes = segment->bufsz; |
1151 | mbytes = segment->memsz; | |
1152 | maddr = segment->mem; | |
72414d3f | 1153 | while (mbytes) { |
dc009d92 EB |
1154 | struct page *page; |
1155 | char *ptr; | |
1156 | size_t uchunk, mchunk; | |
72414d3f | 1157 | |
dc009d92 | 1158 | page = pfn_to_page(maddr >> PAGE_SHIFT); |
c80544dc | 1159 | if (!page) { |
dc009d92 EB |
1160 | result = -ENOMEM; |
1161 | goto out; | |
1162 | } | |
1163 | ptr = kmap(page); | |
1164 | ptr += maddr & ~PAGE_MASK; | |
31c3a3fe ZY |
1165 | mchunk = min_t(size_t, mbytes, |
1166 | PAGE_SIZE - (maddr & ~PAGE_MASK)); | |
1167 | uchunk = min(ubytes, mchunk); | |
1168 | if (mchunk > uchunk) { | |
dc009d92 EB |
1169 | /* Zero the trailing part of the page */ |
1170 | memset(ptr + uchunk, 0, mchunk - uchunk); | |
1171 | } | |
dd5f7260 VG |
1172 | |
1173 | /* For file based kexec, source pages are in kernel memory */ | |
1174 | if (image->file_mode) | |
1175 | memcpy(ptr, kbuf, uchunk); | |
1176 | else | |
1177 | result = copy_from_user(ptr, buf, uchunk); | |
a7956113 | 1178 | kexec_flush_icache_page(page); |
dc009d92 EB |
1179 | kunmap(page); |
1180 | if (result) { | |
f65a03f6 | 1181 | result = -EFAULT; |
dc009d92 EB |
1182 | goto out; |
1183 | } | |
1184 | ubytes -= uchunk; | |
1185 | maddr += mchunk; | |
dd5f7260 VG |
1186 | if (image->file_mode) |
1187 | kbuf += mchunk; | |
1188 | else | |
1189 | buf += mchunk; | |
dc009d92 EB |
1190 | mbytes -= mchunk; |
1191 | } | |
72414d3f | 1192 | out: |
dc009d92 EB |
1193 | return result; |
1194 | } | |
1195 | ||
1196 | static int kimage_load_segment(struct kimage *image, | |
72414d3f | 1197 | struct kexec_segment *segment) |
dc009d92 EB |
1198 | { |
1199 | int result = -ENOMEM; | |
72414d3f MS |
1200 | |
1201 | switch (image->type) { | |
dc009d92 EB |
1202 | case KEXEC_TYPE_DEFAULT: |
1203 | result = kimage_load_normal_segment(image, segment); | |
1204 | break; | |
1205 | case KEXEC_TYPE_CRASH: | |
1206 | result = kimage_load_crash_segment(image, segment); | |
1207 | break; | |
1208 | } | |
72414d3f | 1209 | |
dc009d92 EB |
1210 | return result; |
1211 | } | |
1212 | ||
1213 | /* | |
1214 | * Exec Kernel system call: for obvious reasons only root may call it. | |
1215 | * | |
1216 | * This call breaks up into three pieces. | |
1217 | * - A generic part which loads the new kernel from the current | |
1218 | * address space, and very carefully places the data in the | |
1219 | * allocated pages. | |
1220 | * | |
1221 | * - A generic part that interacts with the kernel and tells all of | |
1222 | * the devices to shut down. Preventing on-going dmas, and placing | |
1223 | * the devices in a consistent state so a later kernel can | |
1224 | * reinitialize them. | |
1225 | * | |
1226 | * - A machine specific part that includes the syscall number | |
002ace78 | 1227 | * and then copies the image to it's final destination. And |
dc009d92 EB |
1228 | * jumps into the image at entry. |
1229 | * | |
1230 | * kexec does not sync, or unmount filesystems so if you need | |
1231 | * that to happen you need to do that yourself. | |
1232 | */ | |
c330dda9 JM |
1233 | struct kimage *kexec_image; |
1234 | struct kimage *kexec_crash_image; | |
7984754b | 1235 | int kexec_load_disabled; |
8c5a1cf0 AM |
1236 | |
1237 | static DEFINE_MUTEX(kexec_mutex); | |
dc009d92 | 1238 | |
754fe8d2 HC |
1239 | SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, |
1240 | struct kexec_segment __user *, segments, unsigned long, flags) | |
dc009d92 EB |
1241 | { |
1242 | struct kimage **dest_image, *image; | |
dc009d92 EB |
1243 | int result; |
1244 | ||
1245 | /* We only trust the superuser with rebooting the system. */ | |
7984754b | 1246 | if (!capable(CAP_SYS_BOOT) || kexec_load_disabled) |
dc009d92 EB |
1247 | return -EPERM; |
1248 | ||
1249 | /* | |
1250 | * Verify we have a legal set of flags | |
1251 | * This leaves us room for future extensions. | |
1252 | */ | |
1253 | if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK)) | |
1254 | return -EINVAL; | |
1255 | ||
1256 | /* Verify we are on the appropriate architecture */ | |
1257 | if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && | |
1258 | ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) | |
dc009d92 | 1259 | return -EINVAL; |
dc009d92 EB |
1260 | |
1261 | /* Put an artificial cap on the number | |
1262 | * of segments passed to kexec_load. | |
1263 | */ | |
1264 | if (nr_segments > KEXEC_SEGMENT_MAX) | |
1265 | return -EINVAL; | |
1266 | ||
1267 | image = NULL; | |
1268 | result = 0; | |
1269 | ||
1270 | /* Because we write directly to the reserved memory | |
1271 | * region when loading crash kernels we need a mutex here to | |
1272 | * prevent multiple crash kernels from attempting to load | |
1273 | * simultaneously, and to prevent a crash kernel from loading | |
1274 | * over the top of a in use crash kernel. | |
1275 | * | |
1276 | * KISS: always take the mutex. | |
1277 | */ | |
8c5a1cf0 | 1278 | if (!mutex_trylock(&kexec_mutex)) |
dc009d92 | 1279 | return -EBUSY; |
72414d3f | 1280 | |
dc009d92 | 1281 | dest_image = &kexec_image; |
72414d3f | 1282 | if (flags & KEXEC_ON_CRASH) |
dc009d92 | 1283 | dest_image = &kexec_crash_image; |
dc009d92 EB |
1284 | if (nr_segments > 0) { |
1285 | unsigned long i; | |
72414d3f | 1286 | |
518a0c71 GL |
1287 | if (flags & KEXEC_ON_CRASH) { |
1288 | /* | |
1289 | * Loading another kernel to switch to if this one | |
1290 | * crashes. Free any current crash dump kernel before | |
dc009d92 EB |
1291 | * we corrupt it. |
1292 | */ | |
518a0c71 | 1293 | |
dc009d92 | 1294 | kimage_free(xchg(&kexec_crash_image, NULL)); |
255aedd9 VG |
1295 | result = kimage_alloc_init(&image, entry, nr_segments, |
1296 | segments, flags); | |
558df720 | 1297 | crash_map_reserved_pages(); |
518a0c71 GL |
1298 | } else { |
1299 | /* Loading another kernel to reboot into. */ | |
1300 | ||
1301 | result = kimage_alloc_init(&image, entry, nr_segments, | |
1302 | segments, flags); | |
dc009d92 | 1303 | } |
72414d3f | 1304 | if (result) |
dc009d92 | 1305 | goto out; |
72414d3f | 1306 | |
3ab83521 HY |
1307 | if (flags & KEXEC_PRESERVE_CONTEXT) |
1308 | image->preserve_context = 1; | |
dc009d92 | 1309 | result = machine_kexec_prepare(image); |
72414d3f | 1310 | if (result) |
dc009d92 | 1311 | goto out; |
72414d3f MS |
1312 | |
1313 | for (i = 0; i < nr_segments; i++) { | |
dc009d92 | 1314 | result = kimage_load_segment(image, &image->segment[i]); |
72414d3f | 1315 | if (result) |
dc009d92 | 1316 | goto out; |
dc009d92 | 1317 | } |
7fccf032 | 1318 | kimage_terminate(image); |
558df720 MH |
1319 | if (flags & KEXEC_ON_CRASH) |
1320 | crash_unmap_reserved_pages(); | |
dc009d92 EB |
1321 | } |
1322 | /* Install the new kernel, and Uninstall the old */ | |
1323 | image = xchg(dest_image, image); | |
1324 | ||
72414d3f | 1325 | out: |
8c5a1cf0 | 1326 | mutex_unlock(&kexec_mutex); |
dc009d92 | 1327 | kimage_free(image); |
72414d3f | 1328 | |
dc009d92 EB |
1329 | return result; |
1330 | } | |
1331 | ||
558df720 MH |
1332 | /* |
1333 | * Add and remove page tables for crashkernel memory | |
1334 | * | |
1335 | * Provide an empty default implementation here -- architecture | |
1336 | * code may override this | |
1337 | */ | |
1338 | void __weak crash_map_reserved_pages(void) | |
1339 | {} | |
1340 | ||
1341 | void __weak crash_unmap_reserved_pages(void) | |
1342 | {} | |
1343 | ||
dc009d92 | 1344 | #ifdef CONFIG_COMPAT |
ca2c405a HC |
1345 | COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, |
1346 | compat_ulong_t, nr_segments, | |
1347 | struct compat_kexec_segment __user *, segments, | |
1348 | compat_ulong_t, flags) | |
dc009d92 EB |
1349 | { |
1350 | struct compat_kexec_segment in; | |
1351 | struct kexec_segment out, __user *ksegments; | |
1352 | unsigned long i, result; | |
1353 | ||
1354 | /* Don't allow clients that don't understand the native | |
1355 | * architecture to do anything. | |
1356 | */ | |
72414d3f | 1357 | if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) |
dc009d92 | 1358 | return -EINVAL; |
dc009d92 | 1359 | |
72414d3f | 1360 | if (nr_segments > KEXEC_SEGMENT_MAX) |
dc009d92 | 1361 | return -EINVAL; |
dc009d92 EB |
1362 | |
1363 | ksegments = compat_alloc_user_space(nr_segments * sizeof(out)); | |
e1bebcf4 | 1364 | for (i = 0; i < nr_segments; i++) { |
dc009d92 | 1365 | result = copy_from_user(&in, &segments[i], sizeof(in)); |
72414d3f | 1366 | if (result) |
dc009d92 | 1367 | return -EFAULT; |
dc009d92 EB |
1368 | |
1369 | out.buf = compat_ptr(in.buf); | |
1370 | out.bufsz = in.bufsz; | |
1371 | out.mem = in.mem; | |
1372 | out.memsz = in.memsz; | |
1373 | ||
1374 | result = copy_to_user(&ksegments[i], &out, sizeof(out)); | |
72414d3f | 1375 | if (result) |
dc009d92 | 1376 | return -EFAULT; |
dc009d92 EB |
1377 | } |
1378 | ||
1379 | return sys_kexec_load(entry, nr_segments, ksegments, flags); | |
1380 | } | |
1381 | #endif | |
1382 | ||
74ca317c | 1383 | #ifdef CONFIG_KEXEC_FILE |
f0895685 VG |
1384 | SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd, |
1385 | unsigned long, cmdline_len, const char __user *, cmdline_ptr, | |
1386 | unsigned long, flags) | |
1387 | { | |
cb105258 VG |
1388 | int ret = 0, i; |
1389 | struct kimage **dest_image, *image; | |
1390 | ||
1391 | /* We only trust the superuser with rebooting the system. */ | |
1392 | if (!capable(CAP_SYS_BOOT) || kexec_load_disabled) | |
1393 | return -EPERM; | |
1394 | ||
1395 | /* Make sure we have a legal set of flags */ | |
1396 | if (flags != (flags & KEXEC_FILE_FLAGS)) | |
1397 | return -EINVAL; | |
1398 | ||
1399 | image = NULL; | |
1400 | ||
1401 | if (!mutex_trylock(&kexec_mutex)) | |
1402 | return -EBUSY; | |
1403 | ||
1404 | dest_image = &kexec_image; | |
1405 | if (flags & KEXEC_FILE_ON_CRASH) | |
1406 | dest_image = &kexec_crash_image; | |
1407 | ||
1408 | if (flags & KEXEC_FILE_UNLOAD) | |
1409 | goto exchange; | |
1410 | ||
1411 | /* | |
1412 | * In case of crash, new kernel gets loaded in reserved region. It is | |
1413 | * same memory where old crash kernel might be loaded. Free any | |
1414 | * current crash dump kernel before we corrupt it. | |
1415 | */ | |
1416 | if (flags & KEXEC_FILE_ON_CRASH) | |
1417 | kimage_free(xchg(&kexec_crash_image, NULL)); | |
1418 | ||
1419 | ret = kimage_file_alloc_init(&image, kernel_fd, initrd_fd, cmdline_ptr, | |
1420 | cmdline_len, flags); | |
1421 | if (ret) | |
1422 | goto out; | |
1423 | ||
1424 | ret = machine_kexec_prepare(image); | |
1425 | if (ret) | |
1426 | goto out; | |
1427 | ||
12db5562 VG |
1428 | ret = kexec_calculate_store_digests(image); |
1429 | if (ret) | |
1430 | goto out; | |
1431 | ||
cb105258 VG |
1432 | for (i = 0; i < image->nr_segments; i++) { |
1433 | struct kexec_segment *ksegment; | |
1434 | ||
1435 | ksegment = &image->segment[i]; | |
1436 | pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n", | |
1437 | i, ksegment->buf, ksegment->bufsz, ksegment->mem, | |
1438 | ksegment->memsz); | |
1439 | ||
1440 | ret = kimage_load_segment(image, &image->segment[i]); | |
1441 | if (ret) | |
1442 | goto out; | |
1443 | } | |
1444 | ||
1445 | kimage_terminate(image); | |
1446 | ||
1447 | /* | |
1448 | * Free up any temporary buffers allocated which are not needed | |
1449 | * after image has been loaded | |
1450 | */ | |
1451 | kimage_file_post_load_cleanup(image); | |
1452 | exchange: | |
1453 | image = xchg(dest_image, image); | |
1454 | out: | |
1455 | mutex_unlock(&kexec_mutex); | |
1456 | kimage_free(image); | |
1457 | return ret; | |
f0895685 VG |
1458 | } |
1459 | ||
74ca317c VG |
1460 | #endif /* CONFIG_KEXEC_FILE */ |
1461 | ||
6e274d14 | 1462 | void crash_kexec(struct pt_regs *regs) |
dc009d92 | 1463 | { |
8c5a1cf0 | 1464 | /* Take the kexec_mutex here to prevent sys_kexec_load |
dc009d92 EB |
1465 | * running on one cpu from replacing the crash kernel |
1466 | * we are using after a panic on a different cpu. | |
1467 | * | |
1468 | * If the crash kernel was not located in a fixed area | |
1469 | * of memory the xchg(&kexec_crash_image) would be | |
1470 | * sufficient. But since I reuse the memory... | |
1471 | */ | |
8c5a1cf0 | 1472 | if (mutex_trylock(&kexec_mutex)) { |
c0ce7d08 | 1473 | if (kexec_crash_image) { |
e996e581 | 1474 | struct pt_regs fixed_regs; |
0f4bd46e | 1475 | |
e996e581 | 1476 | crash_setup_regs(&fixed_regs, regs); |
fd59d231 | 1477 | crash_save_vmcoreinfo(); |
e996e581 | 1478 | machine_crash_shutdown(&fixed_regs); |
c0ce7d08 | 1479 | machine_kexec(kexec_crash_image); |
dc009d92 | 1480 | } |
8c5a1cf0 | 1481 | mutex_unlock(&kexec_mutex); |
dc009d92 EB |
1482 | } |
1483 | } | |
cc571658 | 1484 | |
06a7f711 AW |
1485 | size_t crash_get_memory_size(void) |
1486 | { | |
e05bd336 | 1487 | size_t size = 0; |
06a7f711 | 1488 | mutex_lock(&kexec_mutex); |
e05bd336 | 1489 | if (crashk_res.end != crashk_res.start) |
28f65c11 | 1490 | size = resource_size(&crashk_res); |
06a7f711 AW |
1491 | mutex_unlock(&kexec_mutex); |
1492 | return size; | |
1493 | } | |
1494 | ||
c0bb9e45 AB |
1495 | void __weak crash_free_reserved_phys_range(unsigned long begin, |
1496 | unsigned long end) | |
06a7f711 AW |
1497 | { |
1498 | unsigned long addr; | |
1499 | ||
e07cee23 JL |
1500 | for (addr = begin; addr < end; addr += PAGE_SIZE) |
1501 | free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT)); | |
06a7f711 AW |
1502 | } |
1503 | ||
1504 | int crash_shrink_memory(unsigned long new_size) | |
1505 | { | |
1506 | int ret = 0; | |
1507 | unsigned long start, end; | |
bec013c4 | 1508 | unsigned long old_size; |
6480e5a0 | 1509 | struct resource *ram_res; |
06a7f711 AW |
1510 | |
1511 | mutex_lock(&kexec_mutex); | |
1512 | ||
1513 | if (kexec_crash_image) { | |
1514 | ret = -ENOENT; | |
1515 | goto unlock; | |
1516 | } | |
1517 | start = crashk_res.start; | |
1518 | end = crashk_res.end; | |
bec013c4 MH |
1519 | old_size = (end == 0) ? 0 : end - start + 1; |
1520 | if (new_size >= old_size) { | |
1521 | ret = (new_size == old_size) ? 0 : -EINVAL; | |
06a7f711 AW |
1522 | goto unlock; |
1523 | } | |
1524 | ||
6480e5a0 MH |
1525 | ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL); |
1526 | if (!ram_res) { | |
1527 | ret = -ENOMEM; | |
1528 | goto unlock; | |
1529 | } | |
1530 | ||
558df720 MH |
1531 | start = roundup(start, KEXEC_CRASH_MEM_ALIGN); |
1532 | end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN); | |
06a7f711 | 1533 | |
558df720 | 1534 | crash_map_reserved_pages(); |
c0bb9e45 | 1535 | crash_free_reserved_phys_range(end, crashk_res.end); |
06a7f711 | 1536 | |
e05bd336 | 1537 | if ((start == end) && (crashk_res.parent != NULL)) |
06a7f711 | 1538 | release_resource(&crashk_res); |
6480e5a0 MH |
1539 | |
1540 | ram_res->start = end; | |
1541 | ram_res->end = crashk_res.end; | |
1542 | ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | |
1543 | ram_res->name = "System RAM"; | |
1544 | ||
475f9aa6 | 1545 | crashk_res.end = end - 1; |
6480e5a0 MH |
1546 | |
1547 | insert_resource(&iomem_resource, ram_res); | |
558df720 | 1548 | crash_unmap_reserved_pages(); |
06a7f711 AW |
1549 | |
1550 | unlock: | |
1551 | mutex_unlock(&kexec_mutex); | |
1552 | return ret; | |
1553 | } | |
1554 | ||
85916f81 MD |
1555 | static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, |
1556 | size_t data_len) | |
1557 | { | |
1558 | struct elf_note note; | |
1559 | ||
1560 | note.n_namesz = strlen(name) + 1; | |
1561 | note.n_descsz = data_len; | |
1562 | note.n_type = type; | |
1563 | memcpy(buf, ¬e, sizeof(note)); | |
1564 | buf += (sizeof(note) + 3)/4; | |
1565 | memcpy(buf, name, note.n_namesz); | |
1566 | buf += (note.n_namesz + 3)/4; | |
1567 | memcpy(buf, data, note.n_descsz); | |
1568 | buf += (note.n_descsz + 3)/4; | |
1569 | ||
1570 | return buf; | |
1571 | } | |
1572 | ||
1573 | static void final_note(u32 *buf) | |
1574 | { | |
1575 | struct elf_note note; | |
1576 | ||
1577 | note.n_namesz = 0; | |
1578 | note.n_descsz = 0; | |
1579 | note.n_type = 0; | |
1580 | memcpy(buf, ¬e, sizeof(note)); | |
1581 | } | |
1582 | ||
1583 | void crash_save_cpu(struct pt_regs *regs, int cpu) | |
1584 | { | |
1585 | struct elf_prstatus prstatus; | |
1586 | u32 *buf; | |
1587 | ||
4f4b6c1a | 1588 | if ((cpu < 0) || (cpu >= nr_cpu_ids)) |
85916f81 MD |
1589 | return; |
1590 | ||
1591 | /* Using ELF notes here is opportunistic. | |
1592 | * I need a well defined structure format | |
1593 | * for the data I pass, and I need tags | |
1594 | * on the data to indicate what information I have | |
1595 | * squirrelled away. ELF notes happen to provide | |
1596 | * all of that, so there is no need to invent something new. | |
1597 | */ | |
e1bebcf4 | 1598 | buf = (u32 *)per_cpu_ptr(crash_notes, cpu); |
85916f81 MD |
1599 | if (!buf) |
1600 | return; | |
1601 | memset(&prstatus, 0, sizeof(prstatus)); | |
1602 | prstatus.pr_pid = current->pid; | |
6cd61c0b | 1603 | elf_core_copy_kernel_regs(&prstatus.pr_reg, regs); |
6672f76a | 1604 | buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, |
e1bebcf4 | 1605 | &prstatus, sizeof(prstatus)); |
85916f81 MD |
1606 | final_note(buf); |
1607 | } | |
1608 | ||
cc571658 VG |
1609 | static int __init crash_notes_memory_init(void) |
1610 | { | |
1611 | /* Allocate memory for saving cpu registers. */ | |
1612 | crash_notes = alloc_percpu(note_buf_t); | |
1613 | if (!crash_notes) { | |
e1bebcf4 | 1614 | pr_warn("Kexec: Memory allocation for saving cpu register states failed\n"); |
cc571658 VG |
1615 | return -ENOMEM; |
1616 | } | |
1617 | return 0; | |
1618 | } | |
c96d6660 | 1619 | subsys_initcall(crash_notes_memory_init); |
fd59d231 | 1620 | |
cba63c30 BW |
1621 | |
1622 | /* | |
1623 | * parsing the "crashkernel" commandline | |
1624 | * | |
1625 | * this code is intended to be called from architecture specific code | |
1626 | */ | |
1627 | ||
1628 | ||
1629 | /* | |
1630 | * This function parses command lines in the format | |
1631 | * | |
1632 | * crashkernel=ramsize-range:size[,...][@offset] | |
1633 | * | |
1634 | * The function returns 0 on success and -EINVAL on failure. | |
1635 | */ | |
e1bebcf4 FF |
1636 | static int __init parse_crashkernel_mem(char *cmdline, |
1637 | unsigned long long system_ram, | |
1638 | unsigned long long *crash_size, | |
1639 | unsigned long long *crash_base) | |
cba63c30 BW |
1640 | { |
1641 | char *cur = cmdline, *tmp; | |
1642 | ||
1643 | /* for each entry of the comma-separated list */ | |
1644 | do { | |
1645 | unsigned long long start, end = ULLONG_MAX, size; | |
1646 | ||
1647 | /* get the start of the range */ | |
1648 | start = memparse(cur, &tmp); | |
1649 | if (cur == tmp) { | |
e1bebcf4 | 1650 | pr_warn("crashkernel: Memory value expected\n"); |
cba63c30 BW |
1651 | return -EINVAL; |
1652 | } | |
1653 | cur = tmp; | |
1654 | if (*cur != '-') { | |
e1bebcf4 | 1655 | pr_warn("crashkernel: '-' expected\n"); |
cba63c30 BW |
1656 | return -EINVAL; |
1657 | } | |
1658 | cur++; | |
1659 | ||
1660 | /* if no ':' is here, than we read the end */ | |
1661 | if (*cur != ':') { | |
1662 | end = memparse(cur, &tmp); | |
1663 | if (cur == tmp) { | |
e1bebcf4 | 1664 | pr_warn("crashkernel: Memory value expected\n"); |
cba63c30 BW |
1665 | return -EINVAL; |
1666 | } | |
1667 | cur = tmp; | |
1668 | if (end <= start) { | |
e1bebcf4 | 1669 | pr_warn("crashkernel: end <= start\n"); |
cba63c30 BW |
1670 | return -EINVAL; |
1671 | } | |
1672 | } | |
1673 | ||
1674 | if (*cur != ':') { | |
e1bebcf4 | 1675 | pr_warn("crashkernel: ':' expected\n"); |
cba63c30 BW |
1676 | return -EINVAL; |
1677 | } | |
1678 | cur++; | |
1679 | ||
1680 | size = memparse(cur, &tmp); | |
1681 | if (cur == tmp) { | |
e1bebcf4 | 1682 | pr_warn("Memory value expected\n"); |
cba63c30 BW |
1683 | return -EINVAL; |
1684 | } | |
1685 | cur = tmp; | |
1686 | if (size >= system_ram) { | |
e1bebcf4 | 1687 | pr_warn("crashkernel: invalid size\n"); |
cba63c30 BW |
1688 | return -EINVAL; |
1689 | } | |
1690 | ||
1691 | /* match ? */ | |
be089d79 | 1692 | if (system_ram >= start && system_ram < end) { |
cba63c30 BW |
1693 | *crash_size = size; |
1694 | break; | |
1695 | } | |
1696 | } while (*cur++ == ','); | |
1697 | ||
1698 | if (*crash_size > 0) { | |
11c7da4b | 1699 | while (*cur && *cur != ' ' && *cur != '@') |
cba63c30 BW |
1700 | cur++; |
1701 | if (*cur == '@') { | |
1702 | cur++; | |
1703 | *crash_base = memparse(cur, &tmp); | |
1704 | if (cur == tmp) { | |
e1bebcf4 | 1705 | pr_warn("Memory value expected after '@'\n"); |
cba63c30 BW |
1706 | return -EINVAL; |
1707 | } | |
1708 | } | |
1709 | } | |
1710 | ||
1711 | return 0; | |
1712 | } | |
1713 | ||
1714 | /* | |
1715 | * That function parses "simple" (old) crashkernel command lines like | |
1716 | * | |
e1bebcf4 | 1717 | * crashkernel=size[@offset] |
cba63c30 BW |
1718 | * |
1719 | * It returns 0 on success and -EINVAL on failure. | |
1720 | */ | |
e1bebcf4 FF |
1721 | static int __init parse_crashkernel_simple(char *cmdline, |
1722 | unsigned long long *crash_size, | |
1723 | unsigned long long *crash_base) | |
cba63c30 BW |
1724 | { |
1725 | char *cur = cmdline; | |
1726 | ||
1727 | *crash_size = memparse(cmdline, &cur); | |
1728 | if (cmdline == cur) { | |
e1bebcf4 | 1729 | pr_warn("crashkernel: memory value expected\n"); |
cba63c30 BW |
1730 | return -EINVAL; |
1731 | } | |
1732 | ||
1733 | if (*cur == '@') | |
1734 | *crash_base = memparse(cur+1, &cur); | |
eaa3be6a | 1735 | else if (*cur != ' ' && *cur != '\0') { |
e1bebcf4 | 1736 | pr_warn("crashkernel: unrecognized char\n"); |
eaa3be6a ZD |
1737 | return -EINVAL; |
1738 | } | |
cba63c30 BW |
1739 | |
1740 | return 0; | |
1741 | } | |
1742 | ||
adbc742b YL |
1743 | #define SUFFIX_HIGH 0 |
1744 | #define SUFFIX_LOW 1 | |
1745 | #define SUFFIX_NULL 2 | |
1746 | static __initdata char *suffix_tbl[] = { | |
1747 | [SUFFIX_HIGH] = ",high", | |
1748 | [SUFFIX_LOW] = ",low", | |
1749 | [SUFFIX_NULL] = NULL, | |
1750 | }; | |
1751 | ||
cba63c30 | 1752 | /* |
adbc742b YL |
1753 | * That function parses "suffix" crashkernel command lines like |
1754 | * | |
1755 | * crashkernel=size,[high|low] | |
1756 | * | |
1757 | * It returns 0 on success and -EINVAL on failure. | |
cba63c30 | 1758 | */ |
adbc742b YL |
1759 | static int __init parse_crashkernel_suffix(char *cmdline, |
1760 | unsigned long long *crash_size, | |
adbc742b YL |
1761 | const char *suffix) |
1762 | { | |
1763 | char *cur = cmdline; | |
1764 | ||
1765 | *crash_size = memparse(cmdline, &cur); | |
1766 | if (cmdline == cur) { | |
1767 | pr_warn("crashkernel: memory value expected\n"); | |
1768 | return -EINVAL; | |
1769 | } | |
1770 | ||
1771 | /* check with suffix */ | |
1772 | if (strncmp(cur, suffix, strlen(suffix))) { | |
1773 | pr_warn("crashkernel: unrecognized char\n"); | |
1774 | return -EINVAL; | |
1775 | } | |
1776 | cur += strlen(suffix); | |
1777 | if (*cur != ' ' && *cur != '\0') { | |
1778 | pr_warn("crashkernel: unrecognized char\n"); | |
1779 | return -EINVAL; | |
1780 | } | |
1781 | ||
1782 | return 0; | |
1783 | } | |
1784 | ||
1785 | static __init char *get_last_crashkernel(char *cmdline, | |
1786 | const char *name, | |
1787 | const char *suffix) | |
1788 | { | |
1789 | char *p = cmdline, *ck_cmdline = NULL; | |
1790 | ||
1791 | /* find crashkernel and use the last one if there are more */ | |
1792 | p = strstr(p, name); | |
1793 | while (p) { | |
1794 | char *end_p = strchr(p, ' '); | |
1795 | char *q; | |
1796 | ||
1797 | if (!end_p) | |
1798 | end_p = p + strlen(p); | |
1799 | ||
1800 | if (!suffix) { | |
1801 | int i; | |
1802 | ||
1803 | /* skip the one with any known suffix */ | |
1804 | for (i = 0; suffix_tbl[i]; i++) { | |
1805 | q = end_p - strlen(suffix_tbl[i]); | |
1806 | if (!strncmp(q, suffix_tbl[i], | |
1807 | strlen(suffix_tbl[i]))) | |
1808 | goto next; | |
1809 | } | |
1810 | ck_cmdline = p; | |
1811 | } else { | |
1812 | q = end_p - strlen(suffix); | |
1813 | if (!strncmp(q, suffix, strlen(suffix))) | |
1814 | ck_cmdline = p; | |
1815 | } | |
1816 | next: | |
1817 | p = strstr(p+1, name); | |
1818 | } | |
1819 | ||
1820 | if (!ck_cmdline) | |
1821 | return NULL; | |
1822 | ||
1823 | return ck_cmdline; | |
1824 | } | |
1825 | ||
0212f915 | 1826 | static int __init __parse_crashkernel(char *cmdline, |
cba63c30 BW |
1827 | unsigned long long system_ram, |
1828 | unsigned long long *crash_size, | |
0212f915 | 1829 | unsigned long long *crash_base, |
adbc742b YL |
1830 | const char *name, |
1831 | const char *suffix) | |
cba63c30 | 1832 | { |
cba63c30 | 1833 | char *first_colon, *first_space; |
adbc742b | 1834 | char *ck_cmdline; |
cba63c30 BW |
1835 | |
1836 | BUG_ON(!crash_size || !crash_base); | |
1837 | *crash_size = 0; | |
1838 | *crash_base = 0; | |
1839 | ||
adbc742b | 1840 | ck_cmdline = get_last_crashkernel(cmdline, name, suffix); |
cba63c30 BW |
1841 | |
1842 | if (!ck_cmdline) | |
1843 | return -EINVAL; | |
1844 | ||
0212f915 | 1845 | ck_cmdline += strlen(name); |
cba63c30 | 1846 | |
adbc742b YL |
1847 | if (suffix) |
1848 | return parse_crashkernel_suffix(ck_cmdline, crash_size, | |
36f3f500 | 1849 | suffix); |
cba63c30 BW |
1850 | /* |
1851 | * if the commandline contains a ':', then that's the extended | |
1852 | * syntax -- if not, it must be the classic syntax | |
1853 | */ | |
1854 | first_colon = strchr(ck_cmdline, ':'); | |
1855 | first_space = strchr(ck_cmdline, ' '); | |
1856 | if (first_colon && (!first_space || first_colon < first_space)) | |
1857 | return parse_crashkernel_mem(ck_cmdline, system_ram, | |
1858 | crash_size, crash_base); | |
cba63c30 | 1859 | |
80c74f6a | 1860 | return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base); |
cba63c30 BW |
1861 | } |
1862 | ||
adbc742b YL |
1863 | /* |
1864 | * That function is the entry point for command line parsing and should be | |
1865 | * called from the arch-specific code. | |
1866 | */ | |
0212f915 YL |
1867 | int __init parse_crashkernel(char *cmdline, |
1868 | unsigned long long system_ram, | |
1869 | unsigned long long *crash_size, | |
1870 | unsigned long long *crash_base) | |
1871 | { | |
1872 | return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, | |
adbc742b | 1873 | "crashkernel=", NULL); |
0212f915 | 1874 | } |
55a20ee7 YL |
1875 | |
1876 | int __init parse_crashkernel_high(char *cmdline, | |
1877 | unsigned long long system_ram, | |
1878 | unsigned long long *crash_size, | |
1879 | unsigned long long *crash_base) | |
1880 | { | |
1881 | return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, | |
adbc742b | 1882 | "crashkernel=", suffix_tbl[SUFFIX_HIGH]); |
55a20ee7 | 1883 | } |
0212f915 YL |
1884 | |
1885 | int __init parse_crashkernel_low(char *cmdline, | |
1886 | unsigned long long system_ram, | |
1887 | unsigned long long *crash_size, | |
1888 | unsigned long long *crash_base) | |
1889 | { | |
1890 | return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, | |
adbc742b | 1891 | "crashkernel=", suffix_tbl[SUFFIX_LOW]); |
0212f915 | 1892 | } |
cba63c30 | 1893 | |
fa8ff292 | 1894 | static void update_vmcoreinfo_note(void) |
fd59d231 | 1895 | { |
fa8ff292 | 1896 | u32 *buf = vmcoreinfo_note; |
fd59d231 KO |
1897 | |
1898 | if (!vmcoreinfo_size) | |
1899 | return; | |
fd59d231 KO |
1900 | buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data, |
1901 | vmcoreinfo_size); | |
fd59d231 KO |
1902 | final_note(buf); |
1903 | } | |
1904 | ||
fa8ff292 MH |
1905 | void crash_save_vmcoreinfo(void) |
1906 | { | |
63dca8d5 | 1907 | vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds()); |
fa8ff292 MH |
1908 | update_vmcoreinfo_note(); |
1909 | } | |
1910 | ||
fd59d231 KO |
1911 | void vmcoreinfo_append_str(const char *fmt, ...) |
1912 | { | |
1913 | va_list args; | |
1914 | char buf[0x50]; | |
310faaa9 | 1915 | size_t r; |
fd59d231 KO |
1916 | |
1917 | va_start(args, fmt); | |
a19428e5 | 1918 | r = vscnprintf(buf, sizeof(buf), fmt, args); |
fd59d231 KO |
1919 | va_end(args); |
1920 | ||
31c3a3fe | 1921 | r = min(r, vmcoreinfo_max_size - vmcoreinfo_size); |
fd59d231 KO |
1922 | |
1923 | memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r); | |
1924 | ||
1925 | vmcoreinfo_size += r; | |
1926 | } | |
1927 | ||
1928 | /* | |
1929 | * provide an empty default implementation here -- architecture | |
1930 | * code may override this | |
1931 | */ | |
52f5684c | 1932 | void __weak arch_crash_save_vmcoreinfo(void) |
fd59d231 KO |
1933 | {} |
1934 | ||
52f5684c | 1935 | unsigned long __weak paddr_vmcoreinfo_note(void) |
fd59d231 KO |
1936 | { |
1937 | return __pa((unsigned long)(char *)&vmcoreinfo_note); | |
1938 | } | |
1939 | ||
1940 | static int __init crash_save_vmcoreinfo_init(void) | |
1941 | { | |
bba1f603 KO |
1942 | VMCOREINFO_OSRELEASE(init_uts_ns.name.release); |
1943 | VMCOREINFO_PAGESIZE(PAGE_SIZE); | |
fd59d231 | 1944 | |
bcbba6c1 KO |
1945 | VMCOREINFO_SYMBOL(init_uts_ns); |
1946 | VMCOREINFO_SYMBOL(node_online_map); | |
d034cfab | 1947 | #ifdef CONFIG_MMU |
bcbba6c1 | 1948 | VMCOREINFO_SYMBOL(swapper_pg_dir); |
d034cfab | 1949 | #endif |
bcbba6c1 | 1950 | VMCOREINFO_SYMBOL(_stext); |
f1c4069e | 1951 | VMCOREINFO_SYMBOL(vmap_area_list); |
fd59d231 KO |
1952 | |
1953 | #ifndef CONFIG_NEED_MULTIPLE_NODES | |
bcbba6c1 KO |
1954 | VMCOREINFO_SYMBOL(mem_map); |
1955 | VMCOREINFO_SYMBOL(contig_page_data); | |
fd59d231 KO |
1956 | #endif |
1957 | #ifdef CONFIG_SPARSEMEM | |
bcbba6c1 KO |
1958 | VMCOREINFO_SYMBOL(mem_section); |
1959 | VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS); | |
c76f860c | 1960 | VMCOREINFO_STRUCT_SIZE(mem_section); |
bcbba6c1 | 1961 | VMCOREINFO_OFFSET(mem_section, section_mem_map); |
fd59d231 | 1962 | #endif |
c76f860c KO |
1963 | VMCOREINFO_STRUCT_SIZE(page); |
1964 | VMCOREINFO_STRUCT_SIZE(pglist_data); | |
1965 | VMCOREINFO_STRUCT_SIZE(zone); | |
1966 | VMCOREINFO_STRUCT_SIZE(free_area); | |
1967 | VMCOREINFO_STRUCT_SIZE(list_head); | |
1968 | VMCOREINFO_SIZE(nodemask_t); | |
bcbba6c1 KO |
1969 | VMCOREINFO_OFFSET(page, flags); |
1970 | VMCOREINFO_OFFSET(page, _count); | |
1971 | VMCOREINFO_OFFSET(page, mapping); | |
1972 | VMCOREINFO_OFFSET(page, lru); | |
8d67091e AK |
1973 | VMCOREINFO_OFFSET(page, _mapcount); |
1974 | VMCOREINFO_OFFSET(page, private); | |
bcbba6c1 KO |
1975 | VMCOREINFO_OFFSET(pglist_data, node_zones); |
1976 | VMCOREINFO_OFFSET(pglist_data, nr_zones); | |
fd59d231 | 1977 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
bcbba6c1 | 1978 | VMCOREINFO_OFFSET(pglist_data, node_mem_map); |
fd59d231 | 1979 | #endif |
bcbba6c1 KO |
1980 | VMCOREINFO_OFFSET(pglist_data, node_start_pfn); |
1981 | VMCOREINFO_OFFSET(pglist_data, node_spanned_pages); | |
1982 | VMCOREINFO_OFFSET(pglist_data, node_id); | |
1983 | VMCOREINFO_OFFSET(zone, free_area); | |
1984 | VMCOREINFO_OFFSET(zone, vm_stat); | |
1985 | VMCOREINFO_OFFSET(zone, spanned_pages); | |
1986 | VMCOREINFO_OFFSET(free_area, free_list); | |
1987 | VMCOREINFO_OFFSET(list_head, next); | |
1988 | VMCOREINFO_OFFSET(list_head, prev); | |
13ba3fcb AK |
1989 | VMCOREINFO_OFFSET(vmap_area, va_start); |
1990 | VMCOREINFO_OFFSET(vmap_area, list); | |
bcbba6c1 | 1991 | VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER); |
04d491ab | 1992 | log_buf_kexec_setup(); |
83a08e7c | 1993 | VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES); |
bcbba6c1 | 1994 | VMCOREINFO_NUMBER(NR_FREE_PAGES); |
122c7a59 KO |
1995 | VMCOREINFO_NUMBER(PG_lru); |
1996 | VMCOREINFO_NUMBER(PG_private); | |
1997 | VMCOREINFO_NUMBER(PG_swapcache); | |
8d67091e | 1998 | VMCOREINFO_NUMBER(PG_slab); |
0d0bf667 MT |
1999 | #ifdef CONFIG_MEMORY_FAILURE |
2000 | VMCOREINFO_NUMBER(PG_hwpoison); | |
2001 | #endif | |
b3acc56b | 2002 | VMCOREINFO_NUMBER(PG_head_mask); |
8d67091e | 2003 | VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE); |
3a1122d2 | 2004 | #ifdef CONFIG_HUGETLBFS |
8f1d26d0 | 2005 | VMCOREINFO_SYMBOL(free_huge_page); |
3a1122d2 | 2006 | #endif |
fd59d231 KO |
2007 | |
2008 | arch_crash_save_vmcoreinfo(); | |
fa8ff292 | 2009 | update_vmcoreinfo_note(); |
fd59d231 KO |
2010 | |
2011 | return 0; | |
2012 | } | |
2013 | ||
c96d6660 | 2014 | subsys_initcall(crash_save_vmcoreinfo_init); |
3ab83521 | 2015 | |
74ca317c | 2016 | #ifdef CONFIG_KEXEC_FILE |
cb105258 VG |
2017 | static int locate_mem_hole_top_down(unsigned long start, unsigned long end, |
2018 | struct kexec_buf *kbuf) | |
2019 | { | |
2020 | struct kimage *image = kbuf->image; | |
2021 | unsigned long temp_start, temp_end; | |
2022 | ||
2023 | temp_end = min(end, kbuf->buf_max); | |
2024 | temp_start = temp_end - kbuf->memsz; | |
2025 | ||
2026 | do { | |
2027 | /* align down start */ | |
2028 | temp_start = temp_start & (~(kbuf->buf_align - 1)); | |
2029 | ||
2030 | if (temp_start < start || temp_start < kbuf->buf_min) | |
2031 | return 0; | |
2032 | ||
2033 | temp_end = temp_start + kbuf->memsz - 1; | |
2034 | ||
2035 | /* | |
2036 | * Make sure this does not conflict with any of existing | |
2037 | * segments | |
2038 | */ | |
2039 | if (kimage_is_destination_range(image, temp_start, temp_end)) { | |
2040 | temp_start = temp_start - PAGE_SIZE; | |
2041 | continue; | |
2042 | } | |
2043 | ||
2044 | /* We found a suitable memory range */ | |
2045 | break; | |
2046 | } while (1); | |
2047 | ||
2048 | /* If we are here, we found a suitable memory range */ | |
669280a1 | 2049 | kbuf->mem = temp_start; |
cb105258 VG |
2050 | |
2051 | /* Success, stop navigating through remaining System RAM ranges */ | |
2052 | return 1; | |
2053 | } | |
2054 | ||
2055 | static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end, | |
2056 | struct kexec_buf *kbuf) | |
2057 | { | |
2058 | struct kimage *image = kbuf->image; | |
2059 | unsigned long temp_start, temp_end; | |
2060 | ||
2061 | temp_start = max(start, kbuf->buf_min); | |
2062 | ||
2063 | do { | |
2064 | temp_start = ALIGN(temp_start, kbuf->buf_align); | |
2065 | temp_end = temp_start + kbuf->memsz - 1; | |
2066 | ||
2067 | if (temp_end > end || temp_end > kbuf->buf_max) | |
2068 | return 0; | |
2069 | /* | |
2070 | * Make sure this does not conflict with any of existing | |
2071 | * segments | |
2072 | */ | |
2073 | if (kimage_is_destination_range(image, temp_start, temp_end)) { | |
2074 | temp_start = temp_start + PAGE_SIZE; | |
2075 | continue; | |
2076 | } | |
2077 | ||
2078 | /* We found a suitable memory range */ | |
2079 | break; | |
2080 | } while (1); | |
2081 | ||
2082 | /* If we are here, we found a suitable memory range */ | |
669280a1 | 2083 | kbuf->mem = temp_start; |
cb105258 VG |
2084 | |
2085 | /* Success, stop navigating through remaining System RAM ranges */ | |
2086 | return 1; | |
2087 | } | |
2088 | ||
2089 | static int locate_mem_hole_callback(u64 start, u64 end, void *arg) | |
2090 | { | |
2091 | struct kexec_buf *kbuf = (struct kexec_buf *)arg; | |
2092 | unsigned long sz = end - start + 1; | |
2093 | ||
2094 | /* Returning 0 will take to next memory range */ | |
2095 | if (sz < kbuf->memsz) | |
2096 | return 0; | |
2097 | ||
2098 | if (end < kbuf->buf_min || start > kbuf->buf_max) | |
2099 | return 0; | |
2100 | ||
2101 | /* | |
2102 | * Allocate memory top down with-in ram range. Otherwise bottom up | |
2103 | * allocation. | |
2104 | */ | |
2105 | if (kbuf->top_down) | |
2106 | return locate_mem_hole_top_down(start, end, kbuf); | |
2107 | return locate_mem_hole_bottom_up(start, end, kbuf); | |
2108 | } | |
2109 | ||
2110 | /* | |
2111 | * Helper function for placing a buffer in a kexec segment. This assumes | |
2112 | * that kexec_mutex is held. | |
2113 | */ | |
2114 | int kexec_add_buffer(struct kimage *image, char *buffer, unsigned long bufsz, | |
2115 | unsigned long memsz, unsigned long buf_align, | |
2116 | unsigned long buf_min, unsigned long buf_max, | |
2117 | bool top_down, unsigned long *load_addr) | |
2118 | { | |
2119 | ||
2120 | struct kexec_segment *ksegment; | |
2121 | struct kexec_buf buf, *kbuf; | |
2122 | int ret; | |
2123 | ||
2124 | /* Currently adding segment this way is allowed only in file mode */ | |
2125 | if (!image->file_mode) | |
2126 | return -EINVAL; | |
2127 | ||
2128 | if (image->nr_segments >= KEXEC_SEGMENT_MAX) | |
2129 | return -EINVAL; | |
2130 | ||
2131 | /* | |
2132 | * Make sure we are not trying to add buffer after allocating | |
2133 | * control pages. All segments need to be placed first before | |
2134 | * any control pages are allocated. As control page allocation | |
2135 | * logic goes through list of segments to make sure there are | |
2136 | * no destination overlaps. | |
2137 | */ | |
2138 | if (!list_empty(&image->control_pages)) { | |
2139 | WARN_ON(1); | |
2140 | return -EINVAL; | |
2141 | } | |
2142 | ||
2143 | memset(&buf, 0, sizeof(struct kexec_buf)); | |
2144 | kbuf = &buf; | |
2145 | kbuf->image = image; | |
2146 | kbuf->buffer = buffer; | |
2147 | kbuf->bufsz = bufsz; | |
2148 | ||
2149 | kbuf->memsz = ALIGN(memsz, PAGE_SIZE); | |
2150 | kbuf->buf_align = max(buf_align, PAGE_SIZE); | |
2151 | kbuf->buf_min = buf_min; | |
2152 | kbuf->buf_max = buf_max; | |
2153 | kbuf->top_down = top_down; | |
2154 | ||
2155 | /* Walk the RAM ranges and allocate a suitable range for the buffer */ | |
dd5f7260 VG |
2156 | if (image->type == KEXEC_TYPE_CRASH) |
2157 | ret = walk_iomem_res("Crash kernel", | |
2158 | IORESOURCE_MEM | IORESOURCE_BUSY, | |
2159 | crashk_res.start, crashk_res.end, kbuf, | |
2160 | locate_mem_hole_callback); | |
2161 | else | |
2162 | ret = walk_system_ram_res(0, -1, kbuf, | |
2163 | locate_mem_hole_callback); | |
cb105258 VG |
2164 | if (ret != 1) { |
2165 | /* A suitable memory range could not be found for buffer */ | |
2166 | return -EADDRNOTAVAIL; | |
2167 | } | |
2168 | ||
2169 | /* Found a suitable memory range */ | |
669280a1 BH |
2170 | ksegment = &image->segment[image->nr_segments]; |
2171 | ksegment->kbuf = kbuf->buffer; | |
2172 | ksegment->bufsz = kbuf->bufsz; | |
2173 | ksegment->mem = kbuf->mem; | |
2174 | ksegment->memsz = kbuf->memsz; | |
2175 | image->nr_segments++; | |
cb105258 VG |
2176 | *load_addr = ksegment->mem; |
2177 | return 0; | |
2178 | } | |
2179 | ||
12db5562 VG |
2180 | /* Calculate and store the digest of segments */ |
2181 | static int kexec_calculate_store_digests(struct kimage *image) | |
2182 | { | |
2183 | struct crypto_shash *tfm; | |
2184 | struct shash_desc *desc; | |
2185 | int ret = 0, i, j, zero_buf_sz, sha_region_sz; | |
2186 | size_t desc_size, nullsz; | |
2187 | char *digest; | |
2188 | void *zero_buf; | |
2189 | struct kexec_sha_region *sha_regions; | |
2190 | struct purgatory_info *pi = &image->purgatory_info; | |
2191 | ||
2192 | zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT); | |
2193 | zero_buf_sz = PAGE_SIZE; | |
2194 | ||
2195 | tfm = crypto_alloc_shash("sha256", 0, 0); | |
2196 | if (IS_ERR(tfm)) { | |
2197 | ret = PTR_ERR(tfm); | |
2198 | goto out; | |
2199 | } | |
2200 | ||
2201 | desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); | |
2202 | desc = kzalloc(desc_size, GFP_KERNEL); | |
2203 | if (!desc) { | |
2204 | ret = -ENOMEM; | |
2205 | goto out_free_tfm; | |
2206 | } | |
2207 | ||
2208 | sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region); | |
2209 | sha_regions = vzalloc(sha_region_sz); | |
2210 | if (!sha_regions) | |
2211 | goto out_free_desc; | |
2212 | ||
2213 | desc->tfm = tfm; | |
2214 | desc->flags = 0; | |
2215 | ||
2216 | ret = crypto_shash_init(desc); | |
2217 | if (ret < 0) | |
2218 | goto out_free_sha_regions; | |
2219 | ||
2220 | digest = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL); | |
2221 | if (!digest) { | |
2222 | ret = -ENOMEM; | |
2223 | goto out_free_sha_regions; | |
2224 | } | |
2225 | ||
2226 | for (j = i = 0; i < image->nr_segments; i++) { | |
2227 | struct kexec_segment *ksegment; | |
2228 | ||
2229 | ksegment = &image->segment[i]; | |
2230 | /* | |
2231 | * Skip purgatory as it will be modified once we put digest | |
2232 | * info in purgatory. | |
2233 | */ | |
2234 | if (ksegment->kbuf == pi->purgatory_buf) | |
2235 | continue; | |
2236 | ||
2237 | ret = crypto_shash_update(desc, ksegment->kbuf, | |
2238 | ksegment->bufsz); | |
2239 | if (ret) | |
2240 | break; | |
2241 | ||
2242 | /* | |
2243 | * Assume rest of the buffer is filled with zero and | |
2244 | * update digest accordingly. | |
2245 | */ | |
2246 | nullsz = ksegment->memsz - ksegment->bufsz; | |
2247 | while (nullsz) { | |
2248 | unsigned long bytes = nullsz; | |
2249 | ||
2250 | if (bytes > zero_buf_sz) | |
2251 | bytes = zero_buf_sz; | |
2252 | ret = crypto_shash_update(desc, zero_buf, bytes); | |
2253 | if (ret) | |
2254 | break; | |
2255 | nullsz -= bytes; | |
2256 | } | |
2257 | ||
2258 | if (ret) | |
2259 | break; | |
2260 | ||
2261 | sha_regions[j].start = ksegment->mem; | |
2262 | sha_regions[j].len = ksegment->memsz; | |
2263 | j++; | |
2264 | } | |
2265 | ||
2266 | if (!ret) { | |
2267 | ret = crypto_shash_final(desc, digest); | |
2268 | if (ret) | |
2269 | goto out_free_digest; | |
2270 | ret = kexec_purgatory_get_set_symbol(image, "sha_regions", | |
2271 | sha_regions, sha_region_sz, 0); | |
2272 | if (ret) | |
2273 | goto out_free_digest; | |
2274 | ||
2275 | ret = kexec_purgatory_get_set_symbol(image, "sha256_digest", | |
2276 | digest, SHA256_DIGEST_SIZE, 0); | |
2277 | if (ret) | |
2278 | goto out_free_digest; | |
2279 | } | |
2280 | ||
2281 | out_free_digest: | |
2282 | kfree(digest); | |
2283 | out_free_sha_regions: | |
2284 | vfree(sha_regions); | |
2285 | out_free_desc: | |
2286 | kfree(desc); | |
2287 | out_free_tfm: | |
2288 | kfree(tfm); | |
2289 | out: | |
2290 | return ret; | |
2291 | } | |
2292 | ||
2293 | /* Actually load purgatory. Lot of code taken from kexec-tools */ | |
2294 | static int __kexec_load_purgatory(struct kimage *image, unsigned long min, | |
2295 | unsigned long max, int top_down) | |
2296 | { | |
2297 | struct purgatory_info *pi = &image->purgatory_info; | |
2298 | unsigned long align, buf_align, bss_align, buf_sz, bss_sz, bss_pad; | |
2299 | unsigned long memsz, entry, load_addr, curr_load_addr, bss_addr, offset; | |
2300 | unsigned char *buf_addr, *src; | |
2301 | int i, ret = 0, entry_sidx = -1; | |
2302 | const Elf_Shdr *sechdrs_c; | |
2303 | Elf_Shdr *sechdrs = NULL; | |
2304 | void *purgatory_buf = NULL; | |
2305 | ||
2306 | /* | |
2307 | * sechdrs_c points to section headers in purgatory and are read | |
2308 | * only. No modifications allowed. | |
2309 | */ | |
2310 | sechdrs_c = (void *)pi->ehdr + pi->ehdr->e_shoff; | |
2311 | ||
2312 | /* | |
2313 | * We can not modify sechdrs_c[] and its fields. It is read only. | |
2314 | * Copy it over to a local copy where one can store some temporary | |
2315 | * data and free it at the end. We need to modify ->sh_addr and | |
2316 | * ->sh_offset fields to keep track of permanent and temporary | |
2317 | * locations of sections. | |
2318 | */ | |
2319 | sechdrs = vzalloc(pi->ehdr->e_shnum * sizeof(Elf_Shdr)); | |
2320 | if (!sechdrs) | |
2321 | return -ENOMEM; | |
2322 | ||
2323 | memcpy(sechdrs, sechdrs_c, pi->ehdr->e_shnum * sizeof(Elf_Shdr)); | |
2324 | ||
2325 | /* | |
2326 | * We seem to have multiple copies of sections. First copy is which | |
2327 | * is embedded in kernel in read only section. Some of these sections | |
2328 | * will be copied to a temporary buffer and relocated. And these | |
2329 | * sections will finally be copied to their final destination at | |
2330 | * segment load time. | |
2331 | * | |
2332 | * Use ->sh_offset to reflect section address in memory. It will | |
2333 | * point to original read only copy if section is not allocatable. | |
2334 | * Otherwise it will point to temporary copy which will be relocated. | |
2335 | * | |
2336 | * Use ->sh_addr to contain final address of the section where it | |
2337 | * will go during execution time. | |
2338 | */ | |
2339 | for (i = 0; i < pi->ehdr->e_shnum; i++) { | |
2340 | if (sechdrs[i].sh_type == SHT_NOBITS) | |
2341 | continue; | |
2342 | ||
2343 | sechdrs[i].sh_offset = (unsigned long)pi->ehdr + | |
2344 | sechdrs[i].sh_offset; | |
2345 | } | |
2346 | ||
2347 | /* | |
2348 | * Identify entry point section and make entry relative to section | |
2349 | * start. | |
2350 | */ | |
2351 | entry = pi->ehdr->e_entry; | |
2352 | for (i = 0; i < pi->ehdr->e_shnum; i++) { | |
2353 | if (!(sechdrs[i].sh_flags & SHF_ALLOC)) | |
2354 | continue; | |
2355 | ||
2356 | if (!(sechdrs[i].sh_flags & SHF_EXECINSTR)) | |
2357 | continue; | |
2358 | ||
2359 | /* Make entry section relative */ | |
2360 | if (sechdrs[i].sh_addr <= pi->ehdr->e_entry && | |
2361 | ((sechdrs[i].sh_addr + sechdrs[i].sh_size) > | |
2362 | pi->ehdr->e_entry)) { | |
2363 | entry_sidx = i; | |
2364 | entry -= sechdrs[i].sh_addr; | |
2365 | break; | |
2366 | } | |
2367 | } | |
2368 | ||
2369 | /* Determine how much memory is needed to load relocatable object. */ | |
2370 | buf_align = 1; | |
2371 | bss_align = 1; | |
2372 | buf_sz = 0; | |
2373 | bss_sz = 0; | |
2374 | ||
2375 | for (i = 0; i < pi->ehdr->e_shnum; i++) { | |
2376 | if (!(sechdrs[i].sh_flags & SHF_ALLOC)) | |
2377 | continue; | |
2378 | ||
2379 | align = sechdrs[i].sh_addralign; | |
2380 | if (sechdrs[i].sh_type != SHT_NOBITS) { | |
2381 | if (buf_align < align) | |
2382 | buf_align = align; | |
2383 | buf_sz = ALIGN(buf_sz, align); | |
2384 | buf_sz += sechdrs[i].sh_size; | |
2385 | } else { | |
2386 | /* bss section */ | |
2387 | if (bss_align < align) | |
2388 | bss_align = align; | |
2389 | bss_sz = ALIGN(bss_sz, align); | |
2390 | bss_sz += sechdrs[i].sh_size; | |
2391 | } | |
2392 | } | |
2393 | ||
2394 | /* Determine the bss padding required to align bss properly */ | |
2395 | bss_pad = 0; | |
2396 | if (buf_sz & (bss_align - 1)) | |
2397 | bss_pad = bss_align - (buf_sz & (bss_align - 1)); | |
2398 | ||
2399 | memsz = buf_sz + bss_pad + bss_sz; | |
2400 | ||
2401 | /* Allocate buffer for purgatory */ | |
2402 | purgatory_buf = vzalloc(buf_sz); | |
2403 | if (!purgatory_buf) { | |
2404 | ret = -ENOMEM; | |
2405 | goto out; | |
2406 | } | |
2407 | ||
2408 | if (buf_align < bss_align) | |
2409 | buf_align = bss_align; | |
2410 | ||
2411 | /* Add buffer to segment list */ | |
2412 | ret = kexec_add_buffer(image, purgatory_buf, buf_sz, memsz, | |
2413 | buf_align, min, max, top_down, | |
2414 | &pi->purgatory_load_addr); | |
2415 | if (ret) | |
2416 | goto out; | |
2417 | ||
2418 | /* Load SHF_ALLOC sections */ | |
2419 | buf_addr = purgatory_buf; | |
2420 | load_addr = curr_load_addr = pi->purgatory_load_addr; | |
2421 | bss_addr = load_addr + buf_sz + bss_pad; | |
2422 | ||
2423 | for (i = 0; i < pi->ehdr->e_shnum; i++) { | |
2424 | if (!(sechdrs[i].sh_flags & SHF_ALLOC)) | |
2425 | continue; | |
2426 | ||
2427 | align = sechdrs[i].sh_addralign; | |
2428 | if (sechdrs[i].sh_type != SHT_NOBITS) { | |
2429 | curr_load_addr = ALIGN(curr_load_addr, align); | |
2430 | offset = curr_load_addr - load_addr; | |
2431 | /* We already modifed ->sh_offset to keep src addr */ | |
2432 | src = (char *) sechdrs[i].sh_offset; | |
2433 | memcpy(buf_addr + offset, src, sechdrs[i].sh_size); | |
2434 | ||
2435 | /* Store load address and source address of section */ | |
2436 | sechdrs[i].sh_addr = curr_load_addr; | |
2437 | ||
2438 | /* | |
2439 | * This section got copied to temporary buffer. Update | |
2440 | * ->sh_offset accordingly. | |
2441 | */ | |
2442 | sechdrs[i].sh_offset = (unsigned long)(buf_addr + offset); | |
2443 | ||
2444 | /* Advance to the next address */ | |
2445 | curr_load_addr += sechdrs[i].sh_size; | |
2446 | } else { | |
2447 | bss_addr = ALIGN(bss_addr, align); | |
2448 | sechdrs[i].sh_addr = bss_addr; | |
2449 | bss_addr += sechdrs[i].sh_size; | |
2450 | } | |
2451 | } | |
2452 | ||
2453 | /* Update entry point based on load address of text section */ | |
2454 | if (entry_sidx >= 0) | |
2455 | entry += sechdrs[entry_sidx].sh_addr; | |
2456 | ||
2457 | /* Make kernel jump to purgatory after shutdown */ | |
2458 | image->start = entry; | |
2459 | ||
2460 | /* Used later to get/set symbol values */ | |
2461 | pi->sechdrs = sechdrs; | |
2462 | ||
2463 | /* | |
2464 | * Used later to identify which section is purgatory and skip it | |
2465 | * from checksumming. | |
2466 | */ | |
2467 | pi->purgatory_buf = purgatory_buf; | |
2468 | return ret; | |
2469 | out: | |
2470 | vfree(sechdrs); | |
2471 | vfree(purgatory_buf); | |
2472 | return ret; | |
2473 | } | |
2474 | ||
2475 | static int kexec_apply_relocations(struct kimage *image) | |
2476 | { | |
2477 | int i, ret; | |
2478 | struct purgatory_info *pi = &image->purgatory_info; | |
2479 | Elf_Shdr *sechdrs = pi->sechdrs; | |
2480 | ||
2481 | /* Apply relocations */ | |
2482 | for (i = 0; i < pi->ehdr->e_shnum; i++) { | |
2483 | Elf_Shdr *section, *symtab; | |
2484 | ||
2485 | if (sechdrs[i].sh_type != SHT_RELA && | |
2486 | sechdrs[i].sh_type != SHT_REL) | |
2487 | continue; | |
2488 | ||
2489 | /* | |
2490 | * For section of type SHT_RELA/SHT_REL, | |
2491 | * ->sh_link contains section header index of associated | |
2492 | * symbol table. And ->sh_info contains section header | |
2493 | * index of section to which relocations apply. | |
2494 | */ | |
2495 | if (sechdrs[i].sh_info >= pi->ehdr->e_shnum || | |
2496 | sechdrs[i].sh_link >= pi->ehdr->e_shnum) | |
2497 | return -ENOEXEC; | |
2498 | ||
2499 | section = &sechdrs[sechdrs[i].sh_info]; | |
2500 | symtab = &sechdrs[sechdrs[i].sh_link]; | |
2501 | ||
2502 | if (!(section->sh_flags & SHF_ALLOC)) | |
2503 | continue; | |
2504 | ||
2505 | /* | |
2506 | * symtab->sh_link contain section header index of associated | |
2507 | * string table. | |
2508 | */ | |
2509 | if (symtab->sh_link >= pi->ehdr->e_shnum) | |
2510 | /* Invalid section number? */ | |
2511 | continue; | |
2512 | ||
2513 | /* | |
edb0ec07 | 2514 | * Respective architecture needs to provide support for applying |
12db5562 VG |
2515 | * relocations of type SHT_RELA/SHT_REL. |
2516 | */ | |
2517 | if (sechdrs[i].sh_type == SHT_RELA) | |
2518 | ret = arch_kexec_apply_relocations_add(pi->ehdr, | |
2519 | sechdrs, i); | |
2520 | else if (sechdrs[i].sh_type == SHT_REL) | |
2521 | ret = arch_kexec_apply_relocations(pi->ehdr, | |
2522 | sechdrs, i); | |
2523 | if (ret) | |
2524 | return ret; | |
2525 | } | |
2526 | ||
2527 | return 0; | |
2528 | } | |
2529 | ||
2530 | /* Load relocatable purgatory object and relocate it appropriately */ | |
2531 | int kexec_load_purgatory(struct kimage *image, unsigned long min, | |
2532 | unsigned long max, int top_down, | |
2533 | unsigned long *load_addr) | |
2534 | { | |
2535 | struct purgatory_info *pi = &image->purgatory_info; | |
2536 | int ret; | |
2537 | ||
2538 | if (kexec_purgatory_size <= 0) | |
2539 | return -EINVAL; | |
2540 | ||
2541 | if (kexec_purgatory_size < sizeof(Elf_Ehdr)) | |
2542 | return -ENOEXEC; | |
2543 | ||
2544 | pi->ehdr = (Elf_Ehdr *)kexec_purgatory; | |
2545 | ||
2546 | if (memcmp(pi->ehdr->e_ident, ELFMAG, SELFMAG) != 0 | |
2547 | || pi->ehdr->e_type != ET_REL | |
2548 | || !elf_check_arch(pi->ehdr) | |
2549 | || pi->ehdr->e_shentsize != sizeof(Elf_Shdr)) | |
2550 | return -ENOEXEC; | |
2551 | ||
2552 | if (pi->ehdr->e_shoff >= kexec_purgatory_size | |
2553 | || (pi->ehdr->e_shnum * sizeof(Elf_Shdr) > | |
2554 | kexec_purgatory_size - pi->ehdr->e_shoff)) | |
2555 | return -ENOEXEC; | |
2556 | ||
2557 | ret = __kexec_load_purgatory(image, min, max, top_down); | |
2558 | if (ret) | |
2559 | return ret; | |
2560 | ||
2561 | ret = kexec_apply_relocations(image); | |
2562 | if (ret) | |
2563 | goto out; | |
2564 | ||
2565 | *load_addr = pi->purgatory_load_addr; | |
2566 | return 0; | |
2567 | out: | |
2568 | vfree(pi->sechdrs); | |
2569 | vfree(pi->purgatory_buf); | |
2570 | return ret; | |
2571 | } | |
2572 | ||
2573 | static Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi, | |
2574 | const char *name) | |
2575 | { | |
2576 | Elf_Sym *syms; | |
2577 | Elf_Shdr *sechdrs; | |
2578 | Elf_Ehdr *ehdr; | |
2579 | int i, k; | |
2580 | const char *strtab; | |
2581 | ||
2582 | if (!pi->sechdrs || !pi->ehdr) | |
2583 | return NULL; | |
2584 | ||
2585 | sechdrs = pi->sechdrs; | |
2586 | ehdr = pi->ehdr; | |
2587 | ||
2588 | for (i = 0; i < ehdr->e_shnum; i++) { | |
2589 | if (sechdrs[i].sh_type != SHT_SYMTAB) | |
2590 | continue; | |
2591 | ||
2592 | if (sechdrs[i].sh_link >= ehdr->e_shnum) | |
2593 | /* Invalid strtab section number */ | |
2594 | continue; | |
2595 | strtab = (char *)sechdrs[sechdrs[i].sh_link].sh_offset; | |
2596 | syms = (Elf_Sym *)sechdrs[i].sh_offset; | |
2597 | ||
2598 | /* Go through symbols for a match */ | |
2599 | for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) { | |
2600 | if (ELF_ST_BIND(syms[k].st_info) != STB_GLOBAL) | |
2601 | continue; | |
2602 | ||
2603 | if (strcmp(strtab + syms[k].st_name, name) != 0) | |
2604 | continue; | |
2605 | ||
2606 | if (syms[k].st_shndx == SHN_UNDEF || | |
2607 | syms[k].st_shndx >= ehdr->e_shnum) { | |
2608 | pr_debug("Symbol: %s has bad section index %d.\n", | |
2609 | name, syms[k].st_shndx); | |
2610 | return NULL; | |
2611 | } | |
2612 | ||
2613 | /* Found the symbol we are looking for */ | |
2614 | return &syms[k]; | |
2615 | } | |
2616 | } | |
2617 | ||
2618 | return NULL; | |
2619 | } | |
2620 | ||
2621 | void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name) | |
2622 | { | |
2623 | struct purgatory_info *pi = &image->purgatory_info; | |
2624 | Elf_Sym *sym; | |
2625 | Elf_Shdr *sechdr; | |
2626 | ||
2627 | sym = kexec_purgatory_find_symbol(pi, name); | |
2628 | if (!sym) | |
2629 | return ERR_PTR(-EINVAL); | |
2630 | ||
2631 | sechdr = &pi->sechdrs[sym->st_shndx]; | |
2632 | ||
2633 | /* | |
2634 | * Returns the address where symbol will finally be loaded after | |
2635 | * kexec_load_segment() | |
2636 | */ | |
2637 | return (void *)(sechdr->sh_addr + sym->st_value); | |
2638 | } | |
2639 | ||
2640 | /* | |
2641 | * Get or set value of a symbol. If "get_value" is true, symbol value is | |
2642 | * returned in buf otherwise symbol value is set based on value in buf. | |
2643 | */ | |
2644 | int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, | |
2645 | void *buf, unsigned int size, bool get_value) | |
2646 | { | |
2647 | Elf_Sym *sym; | |
2648 | Elf_Shdr *sechdrs; | |
2649 | struct purgatory_info *pi = &image->purgatory_info; | |
2650 | char *sym_buf; | |
2651 | ||
2652 | sym = kexec_purgatory_find_symbol(pi, name); | |
2653 | if (!sym) | |
2654 | return -EINVAL; | |
2655 | ||
2656 | if (sym->st_size != size) { | |
2657 | pr_err("symbol %s size mismatch: expected %lu actual %u\n", | |
2658 | name, (unsigned long)sym->st_size, size); | |
2659 | return -EINVAL; | |
2660 | } | |
2661 | ||
2662 | sechdrs = pi->sechdrs; | |
2663 | ||
2664 | if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) { | |
2665 | pr_err("symbol %s is in a bss section. Cannot %s\n", name, | |
2666 | get_value ? "get" : "set"); | |
2667 | return -EINVAL; | |
2668 | } | |
2669 | ||
2670 | sym_buf = (unsigned char *)sechdrs[sym->st_shndx].sh_offset + | |
2671 | sym->st_value; | |
2672 | ||
2673 | if (get_value) | |
2674 | memcpy((void *)buf, sym_buf, size); | |
2675 | else | |
2676 | memcpy((void *)sym_buf, buf, size); | |
2677 | ||
2678 | return 0; | |
2679 | } | |
74ca317c | 2680 | #endif /* CONFIG_KEXEC_FILE */ |
cb105258 | 2681 | |
7ade3fcc HY |
2682 | /* |
2683 | * Move into place and start executing a preloaded standalone | |
2684 | * executable. If nothing was preloaded return an error. | |
3ab83521 HY |
2685 | */ |
2686 | int kernel_kexec(void) | |
2687 | { | |
2688 | int error = 0; | |
2689 | ||
8c5a1cf0 | 2690 | if (!mutex_trylock(&kexec_mutex)) |
3ab83521 HY |
2691 | return -EBUSY; |
2692 | if (!kexec_image) { | |
2693 | error = -EINVAL; | |
2694 | goto Unlock; | |
2695 | } | |
2696 | ||
3ab83521 | 2697 | #ifdef CONFIG_KEXEC_JUMP |
7ade3fcc | 2698 | if (kexec_image->preserve_context) { |
bcda53fa | 2699 | lock_system_sleep(); |
89081d17 HY |
2700 | pm_prepare_console(); |
2701 | error = freeze_processes(); | |
2702 | if (error) { | |
2703 | error = -EBUSY; | |
2704 | goto Restore_console; | |
2705 | } | |
2706 | suspend_console(); | |
d1616302 | 2707 | error = dpm_suspend_start(PMSG_FREEZE); |
89081d17 HY |
2708 | if (error) |
2709 | goto Resume_console; | |
d1616302 | 2710 | /* At this point, dpm_suspend_start() has been called, |
cf579dfb RW |
2711 | * but *not* dpm_suspend_end(). We *must* call |
2712 | * dpm_suspend_end() now. Otherwise, drivers for | |
89081d17 HY |
2713 | * some devices (e.g. interrupt controllers) become |
2714 | * desynchronized with the actual state of the | |
2715 | * hardware at resume time, and evil weirdness ensues. | |
2716 | */ | |
cf579dfb | 2717 | error = dpm_suspend_end(PMSG_FREEZE); |
89081d17 | 2718 | if (error) |
749b0afc RW |
2719 | goto Resume_devices; |
2720 | error = disable_nonboot_cpus(); | |
2721 | if (error) | |
2722 | goto Enable_cpus; | |
2ed8d2b3 | 2723 | local_irq_disable(); |
2e711c04 | 2724 | error = syscore_suspend(); |
770824bd | 2725 | if (error) |
749b0afc | 2726 | goto Enable_irqs; |
7ade3fcc | 2727 | } else |
3ab83521 | 2728 | #endif |
7ade3fcc | 2729 | { |
4fc9bbf9 | 2730 | kexec_in_progress = true; |
ca195b7f | 2731 | kernel_restart_prepare(NULL); |
c97102ba | 2732 | migrate_to_reboot_cpu(); |
011e4b02 SB |
2733 | |
2734 | /* | |
2735 | * migrate_to_reboot_cpu() disables CPU hotplug assuming that | |
2736 | * no further code needs to use CPU hotplug (which is true in | |
2737 | * the reboot case). However, the kexec path depends on using | |
2738 | * CPU hotplug again; so re-enable it here. | |
2739 | */ | |
2740 | cpu_hotplug_enable(); | |
e1bebcf4 | 2741 | pr_emerg("Starting new kernel\n"); |
3ab83521 HY |
2742 | machine_shutdown(); |
2743 | } | |
2744 | ||
2745 | machine_kexec(kexec_image); | |
2746 | ||
3ab83521 | 2747 | #ifdef CONFIG_KEXEC_JUMP |
7ade3fcc | 2748 | if (kexec_image->preserve_context) { |
19234c08 | 2749 | syscore_resume(); |
749b0afc | 2750 | Enable_irqs: |
3ab83521 | 2751 | local_irq_enable(); |
749b0afc | 2752 | Enable_cpus: |
89081d17 | 2753 | enable_nonboot_cpus(); |
cf579dfb | 2754 | dpm_resume_start(PMSG_RESTORE); |
89081d17 | 2755 | Resume_devices: |
d1616302 | 2756 | dpm_resume_end(PMSG_RESTORE); |
89081d17 HY |
2757 | Resume_console: |
2758 | resume_console(); | |
2759 | thaw_processes(); | |
2760 | Restore_console: | |
2761 | pm_restore_console(); | |
bcda53fa | 2762 | unlock_system_sleep(); |
3ab83521 | 2763 | } |
7ade3fcc | 2764 | #endif |
3ab83521 HY |
2765 | |
2766 | Unlock: | |
8c5a1cf0 | 2767 | mutex_unlock(&kexec_mutex); |
3ab83521 HY |
2768 | return error; |
2769 | } |