Merge remote-tracking branch 'rcu/rcu/next'
[deliverable/linux.git] / kernel / kexec_core.c
1 /*
2 * kexec.c - kexec system call core code.
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/capability.h>
12 #include <linux/mm.h>
13 #include <linux/file.h>
14 #include <linux/slab.h>
15 #include <linux/fs.h>
16 #include <linux/kexec.h>
17 #include <linux/mutex.h>
18 #include <linux/list.h>
19 #include <linux/highmem.h>
20 #include <linux/syscalls.h>
21 #include <linux/reboot.h>
22 #include <linux/ioport.h>
23 #include <linux/hardirq.h>
24 #include <linux/elf.h>
25 #include <linux/elfcore.h>
26 #include <linux/utsname.h>
27 #include <linux/numa.h>
28 #include <linux/suspend.h>
29 #include <linux/device.h>
30 #include <linux/freezer.h>
31 #include <linux/pm.h>
32 #include <linux/cpu.h>
33 #include <linux/uaccess.h>
34 #include <linux/io.h>
35 #include <linux/console.h>
36 #include <linux/vmalloc.h>
37 #include <linux/swap.h>
38 #include <linux/syscore_ops.h>
39 #include <linux/compiler.h>
40 #include <linux/hugetlb.h>
41
42 #include <asm/page.h>
43 #include <asm/sections.h>
44
45 #include <crypto/hash.h>
46 #include <crypto/sha.h>
47 #include "kexec_internal.h"
48
49 DEFINE_MUTEX(kexec_mutex);
50
51 /* Per cpu memory for storing cpu states in case of system crash. */
52 note_buf_t __percpu *crash_notes;
53
54 /* vmcoreinfo stuff */
55 static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
56 u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
57 size_t vmcoreinfo_size;
58 size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
59
60 /* Flag to indicate we are going to kexec a new kernel */
61 bool kexec_in_progress = false;
62
63
64 /* Location of the reserved area for the crash kernel */
65 struct resource crashk_res = {
66 .name = "Crash kernel",
67 .start = 0,
68 .end = 0,
69 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
70 .desc = IORES_DESC_CRASH_KERNEL
71 };
72 struct resource crashk_low_res = {
73 .name = "Crash kernel",
74 .start = 0,
75 .end = 0,
76 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
77 .desc = IORES_DESC_CRASH_KERNEL
78 };
79
80 int kexec_should_crash(struct task_struct *p)
81 {
82 /*
83 * If crash_kexec_post_notifiers is enabled, don't run
84 * crash_kexec() here yet, which must be run after panic
85 * notifiers in panic().
86 */
87 if (crash_kexec_post_notifiers)
88 return 0;
89 /*
90 * There are 4 panic() calls in do_exit() path, each of which
91 * corresponds to each of these 4 conditions.
92 */
93 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
94 return 1;
95 return 0;
96 }
97
98 int kexec_crash_loaded(void)
99 {
100 return !!kexec_crash_image;
101 }
102 EXPORT_SYMBOL_GPL(kexec_crash_loaded);
103
104 /*
105 * When kexec transitions to the new kernel there is a one-to-one
106 * mapping between physical and virtual addresses. On processors
107 * where you can disable the MMU this is trivial, and easy. For
108 * others it is still a simple predictable page table to setup.
109 *
110 * In that environment kexec copies the new kernel to its final
111 * resting place. This means I can only support memory whose
112 * physical address can fit in an unsigned long. In particular
113 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
114 * If the assembly stub has more restrictive requirements
115 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
116 * defined more restrictively in <asm/kexec.h>.
117 *
118 * The code for the transition from the current kernel to the
119 * the new kernel is placed in the control_code_buffer, whose size
120 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
121 * page of memory is necessary, but some architectures require more.
122 * Because this memory must be identity mapped in the transition from
123 * virtual to physical addresses it must live in the range
124 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
125 * modifiable.
126 *
127 * The assembly stub in the control code buffer is passed a linked list
128 * of descriptor pages detailing the source pages of the new kernel,
129 * and the destination addresses of those source pages. As this data
130 * structure is not used in the context of the current OS, it must
131 * be self-contained.
132 *
133 * The code has been made to work with highmem pages and will use a
134 * destination page in its final resting place (if it happens
135 * to allocate it). The end product of this is that most of the
136 * physical address space, and most of RAM can be used.
137 *
138 * Future directions include:
139 * - allocating a page table with the control code buffer identity
140 * mapped, to simplify machine_kexec and make kexec_on_panic more
141 * reliable.
142 */
143
144 /*
145 * KIMAGE_NO_DEST is an impossible destination address..., for
146 * allocating pages whose destination address we do not care about.
147 */
148 #define KIMAGE_NO_DEST (-1UL)
149 #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
150
151 static struct page *kimage_alloc_page(struct kimage *image,
152 gfp_t gfp_mask,
153 unsigned long dest);
154
155 int sanity_check_segment_list(struct kimage *image)
156 {
157 int i;
158 unsigned long nr_segments = image->nr_segments;
159 unsigned long total_pages = 0;
160
161 /*
162 * Verify we have good destination addresses. The caller is
163 * responsible for making certain we don't attempt to load
164 * the new image into invalid or reserved areas of RAM. This
165 * just verifies it is an address we can use.
166 *
167 * Since the kernel does everything in page size chunks ensure
168 * the destination addresses are page aligned. Too many
169 * special cases crop of when we don't do this. The most
170 * insidious is getting overlapping destination addresses
171 * simply because addresses are changed to page size
172 * granularity.
173 */
174 for (i = 0; i < nr_segments; i++) {
175 unsigned long mstart, mend;
176
177 mstart = image->segment[i].mem;
178 mend = mstart + image->segment[i].memsz;
179 if (mstart > mend)
180 return -EADDRNOTAVAIL;
181 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
182 return -EADDRNOTAVAIL;
183 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
184 return -EADDRNOTAVAIL;
185 }
186
187 /* Verify our destination addresses do not overlap.
188 * If we alloed overlapping destination addresses
189 * through very weird things can happen with no
190 * easy explanation as one segment stops on another.
191 */
192 for (i = 0; i < nr_segments; i++) {
193 unsigned long mstart, mend;
194 unsigned long j;
195
196 mstart = image->segment[i].mem;
197 mend = mstart + image->segment[i].memsz;
198 for (j = 0; j < i; j++) {
199 unsigned long pstart, pend;
200
201 pstart = image->segment[j].mem;
202 pend = pstart + image->segment[j].memsz;
203 /* Do the segments overlap ? */
204 if ((mend > pstart) && (mstart < pend))
205 return -EINVAL;
206 }
207 }
208
209 /* Ensure our buffer sizes are strictly less than
210 * our memory sizes. This should always be the case,
211 * and it is easier to check up front than to be surprised
212 * later on.
213 */
214 for (i = 0; i < nr_segments; i++) {
215 if (image->segment[i].bufsz > image->segment[i].memsz)
216 return -EINVAL;
217 }
218
219 /*
220 * Verify that no more than half of memory will be consumed. If the
221 * request from userspace is too large, a large amount of time will be
222 * wasted allocating pages, which can cause a soft lockup.
223 */
224 for (i = 0; i < nr_segments; i++) {
225 if (PAGE_COUNT(image->segment[i].memsz) > totalram_pages / 2)
226 return -EINVAL;
227
228 total_pages += PAGE_COUNT(image->segment[i].memsz);
229 }
230
231 if (total_pages > totalram_pages / 2)
232 return -EINVAL;
233
234 /*
235 * Verify we have good destination addresses. Normally
236 * the caller is responsible for making certain we don't
237 * attempt to load the new image into invalid or reserved
238 * areas of RAM. But crash kernels are preloaded into a
239 * reserved area of ram. We must ensure the addresses
240 * are in the reserved area otherwise preloading the
241 * kernel could corrupt things.
242 */
243
244 if (image->type == KEXEC_TYPE_CRASH) {
245 for (i = 0; i < nr_segments; i++) {
246 unsigned long mstart, mend;
247
248 mstart = image->segment[i].mem;
249 mend = mstart + image->segment[i].memsz - 1;
250 /* Ensure we are within the crash kernel limits */
251 if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
252 (mend > phys_to_boot_phys(crashk_res.end)))
253 return -EADDRNOTAVAIL;
254 }
255 }
256
257 return 0;
258 }
259
260 struct kimage *do_kimage_alloc_init(void)
261 {
262 struct kimage *image;
263
264 /* Allocate a controlling structure */
265 image = kzalloc(sizeof(*image), GFP_KERNEL);
266 if (!image)
267 return NULL;
268
269 image->head = 0;
270 image->entry = &image->head;
271 image->last_entry = &image->head;
272 image->control_page = ~0; /* By default this does not apply */
273 image->type = KEXEC_TYPE_DEFAULT;
274
275 /* Initialize the list of control pages */
276 INIT_LIST_HEAD(&image->control_pages);
277
278 /* Initialize the list of destination pages */
279 INIT_LIST_HEAD(&image->dest_pages);
280
281 /* Initialize the list of unusable pages */
282 INIT_LIST_HEAD(&image->unusable_pages);
283
284 return image;
285 }
286
287 int kimage_is_destination_range(struct kimage *image,
288 unsigned long start,
289 unsigned long end)
290 {
291 unsigned long i;
292
293 for (i = 0; i < image->nr_segments; i++) {
294 unsigned long mstart, mend;
295
296 mstart = image->segment[i].mem;
297 mend = mstart + image->segment[i].memsz;
298 if ((end > mstart) && (start < mend))
299 return 1;
300 }
301
302 return 0;
303 }
304
305 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
306 {
307 struct page *pages;
308
309 pages = alloc_pages(gfp_mask, order);
310 if (pages) {
311 unsigned int count, i;
312
313 pages->mapping = NULL;
314 set_page_private(pages, order);
315 count = 1 << order;
316 for (i = 0; i < count; i++)
317 SetPageReserved(pages + i);
318 }
319
320 return pages;
321 }
322
323 static void kimage_free_pages(struct page *page)
324 {
325 unsigned int order, count, i;
326
327 order = page_private(page);
328 count = 1 << order;
329 for (i = 0; i < count; i++)
330 ClearPageReserved(page + i);
331 __free_pages(page, order);
332 }
333
334 void kimage_free_page_list(struct list_head *list)
335 {
336 struct page *page, *next;
337
338 list_for_each_entry_safe(page, next, list, lru) {
339 list_del(&page->lru);
340 kimage_free_pages(page);
341 }
342 }
343
344 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
345 unsigned int order)
346 {
347 /* Control pages are special, they are the intermediaries
348 * that are needed while we copy the rest of the pages
349 * to their final resting place. As such they must
350 * not conflict with either the destination addresses
351 * or memory the kernel is already using.
352 *
353 * The only case where we really need more than one of
354 * these are for architectures where we cannot disable
355 * the MMU and must instead generate an identity mapped
356 * page table for all of the memory.
357 *
358 * At worst this runs in O(N) of the image size.
359 */
360 struct list_head extra_pages;
361 struct page *pages;
362 unsigned int count;
363
364 count = 1 << order;
365 INIT_LIST_HEAD(&extra_pages);
366
367 /* Loop while I can allocate a page and the page allocated
368 * is a destination page.
369 */
370 do {
371 unsigned long pfn, epfn, addr, eaddr;
372
373 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
374 if (!pages)
375 break;
376 pfn = page_to_boot_pfn(pages);
377 epfn = pfn + count;
378 addr = pfn << PAGE_SHIFT;
379 eaddr = epfn << PAGE_SHIFT;
380 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
381 kimage_is_destination_range(image, addr, eaddr)) {
382 list_add(&pages->lru, &extra_pages);
383 pages = NULL;
384 }
385 } while (!pages);
386
387 if (pages) {
388 /* Remember the allocated page... */
389 list_add(&pages->lru, &image->control_pages);
390
391 /* Because the page is already in it's destination
392 * location we will never allocate another page at
393 * that address. Therefore kimage_alloc_pages
394 * will not return it (again) and we don't need
395 * to give it an entry in image->segment[].
396 */
397 }
398 /* Deal with the destination pages I have inadvertently allocated.
399 *
400 * Ideally I would convert multi-page allocations into single
401 * page allocations, and add everything to image->dest_pages.
402 *
403 * For now it is simpler to just free the pages.
404 */
405 kimage_free_page_list(&extra_pages);
406
407 return pages;
408 }
409
410 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
411 unsigned int order)
412 {
413 /* Control pages are special, they are the intermediaries
414 * that are needed while we copy the rest of the pages
415 * to their final resting place. As such they must
416 * not conflict with either the destination addresses
417 * or memory the kernel is already using.
418 *
419 * Control pages are also the only pags we must allocate
420 * when loading a crash kernel. All of the other pages
421 * are specified by the segments and we just memcpy
422 * into them directly.
423 *
424 * The only case where we really need more than one of
425 * these are for architectures where we cannot disable
426 * the MMU and must instead generate an identity mapped
427 * page table for all of the memory.
428 *
429 * Given the low demand this implements a very simple
430 * allocator that finds the first hole of the appropriate
431 * size in the reserved memory region, and allocates all
432 * of the memory up to and including the hole.
433 */
434 unsigned long hole_start, hole_end, size;
435 struct page *pages;
436
437 pages = NULL;
438 size = (1 << order) << PAGE_SHIFT;
439 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
440 hole_end = hole_start + size - 1;
441 while (hole_end <= crashk_res.end) {
442 unsigned long i;
443
444 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
445 break;
446 /* See if I overlap any of the segments */
447 for (i = 0; i < image->nr_segments; i++) {
448 unsigned long mstart, mend;
449
450 mstart = image->segment[i].mem;
451 mend = mstart + image->segment[i].memsz - 1;
452 if ((hole_end >= mstart) && (hole_start <= mend)) {
453 /* Advance the hole to the end of the segment */
454 hole_start = (mend + (size - 1)) & ~(size - 1);
455 hole_end = hole_start + size - 1;
456 break;
457 }
458 }
459 /* If I don't overlap any segments I have found my hole! */
460 if (i == image->nr_segments) {
461 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
462 image->control_page = hole_end;
463 break;
464 }
465 }
466
467 return pages;
468 }
469
470
471 struct page *kimage_alloc_control_pages(struct kimage *image,
472 unsigned int order)
473 {
474 struct page *pages = NULL;
475
476 switch (image->type) {
477 case KEXEC_TYPE_DEFAULT:
478 pages = kimage_alloc_normal_control_pages(image, order);
479 break;
480 case KEXEC_TYPE_CRASH:
481 pages = kimage_alloc_crash_control_pages(image, order);
482 break;
483 }
484
485 return pages;
486 }
487
488 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
489 {
490 if (*image->entry != 0)
491 image->entry++;
492
493 if (image->entry == image->last_entry) {
494 kimage_entry_t *ind_page;
495 struct page *page;
496
497 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
498 if (!page)
499 return -ENOMEM;
500
501 ind_page = page_address(page);
502 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
503 image->entry = ind_page;
504 image->last_entry = ind_page +
505 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
506 }
507 *image->entry = entry;
508 image->entry++;
509 *image->entry = 0;
510
511 return 0;
512 }
513
514 static int kimage_set_destination(struct kimage *image,
515 unsigned long destination)
516 {
517 int result;
518
519 destination &= PAGE_MASK;
520 result = kimage_add_entry(image, destination | IND_DESTINATION);
521
522 return result;
523 }
524
525
526 static int kimage_add_page(struct kimage *image, unsigned long page)
527 {
528 int result;
529
530 page &= PAGE_MASK;
531 result = kimage_add_entry(image, page | IND_SOURCE);
532
533 return result;
534 }
535
536
537 static void kimage_free_extra_pages(struct kimage *image)
538 {
539 /* Walk through and free any extra destination pages I may have */
540 kimage_free_page_list(&image->dest_pages);
541
542 /* Walk through and free any unusable pages I have cached */
543 kimage_free_page_list(&image->unusable_pages);
544
545 }
546 void kimage_terminate(struct kimage *image)
547 {
548 if (*image->entry != 0)
549 image->entry++;
550
551 *image->entry = IND_DONE;
552 }
553
554 #define for_each_kimage_entry(image, ptr, entry) \
555 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
556 ptr = (entry & IND_INDIRECTION) ? \
557 boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
558
559 static void kimage_free_entry(kimage_entry_t entry)
560 {
561 struct page *page;
562
563 page = boot_pfn_to_page(entry >> PAGE_SHIFT);
564 kimage_free_pages(page);
565 }
566
567 void kimage_free(struct kimage *image)
568 {
569 kimage_entry_t *ptr, entry;
570 kimage_entry_t ind = 0;
571
572 if (!image)
573 return;
574
575 kimage_free_extra_pages(image);
576 for_each_kimage_entry(image, ptr, entry) {
577 if (entry & IND_INDIRECTION) {
578 /* Free the previous indirection page */
579 if (ind & IND_INDIRECTION)
580 kimage_free_entry(ind);
581 /* Save this indirection page until we are
582 * done with it.
583 */
584 ind = entry;
585 } else if (entry & IND_SOURCE)
586 kimage_free_entry(entry);
587 }
588 /* Free the final indirection page */
589 if (ind & IND_INDIRECTION)
590 kimage_free_entry(ind);
591
592 /* Handle any machine specific cleanup */
593 machine_kexec_cleanup(image);
594
595 /* Free the kexec control pages... */
596 kimage_free_page_list(&image->control_pages);
597
598 /*
599 * Free up any temporary buffers allocated. This might hit if
600 * error occurred much later after buffer allocation.
601 */
602 if (image->file_mode)
603 kimage_file_post_load_cleanup(image);
604
605 kfree(image);
606 }
607
608 static kimage_entry_t *kimage_dst_used(struct kimage *image,
609 unsigned long page)
610 {
611 kimage_entry_t *ptr, entry;
612 unsigned long destination = 0;
613
614 for_each_kimage_entry(image, ptr, entry) {
615 if (entry & IND_DESTINATION)
616 destination = entry & PAGE_MASK;
617 else if (entry & IND_SOURCE) {
618 if (page == destination)
619 return ptr;
620 destination += PAGE_SIZE;
621 }
622 }
623
624 return NULL;
625 }
626
627 static struct page *kimage_alloc_page(struct kimage *image,
628 gfp_t gfp_mask,
629 unsigned long destination)
630 {
631 /*
632 * Here we implement safeguards to ensure that a source page
633 * is not copied to its destination page before the data on
634 * the destination page is no longer useful.
635 *
636 * To do this we maintain the invariant that a source page is
637 * either its own destination page, or it is not a
638 * destination page at all.
639 *
640 * That is slightly stronger than required, but the proof
641 * that no problems will not occur is trivial, and the
642 * implementation is simply to verify.
643 *
644 * When allocating all pages normally this algorithm will run
645 * in O(N) time, but in the worst case it will run in O(N^2)
646 * time. If the runtime is a problem the data structures can
647 * be fixed.
648 */
649 struct page *page;
650 unsigned long addr;
651
652 /*
653 * Walk through the list of destination pages, and see if I
654 * have a match.
655 */
656 list_for_each_entry(page, &image->dest_pages, lru) {
657 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
658 if (addr == destination) {
659 list_del(&page->lru);
660 return page;
661 }
662 }
663 page = NULL;
664 while (1) {
665 kimage_entry_t *old;
666
667 /* Allocate a page, if we run out of memory give up */
668 page = kimage_alloc_pages(gfp_mask, 0);
669 if (!page)
670 return NULL;
671 /* If the page cannot be used file it away */
672 if (page_to_boot_pfn(page) >
673 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
674 list_add(&page->lru, &image->unusable_pages);
675 continue;
676 }
677 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
678
679 /* If it is the destination page we want use it */
680 if (addr == destination)
681 break;
682
683 /* If the page is not a destination page use it */
684 if (!kimage_is_destination_range(image, addr,
685 addr + PAGE_SIZE))
686 break;
687
688 /*
689 * I know that the page is someones destination page.
690 * See if there is already a source page for this
691 * destination page. And if so swap the source pages.
692 */
693 old = kimage_dst_used(image, addr);
694 if (old) {
695 /* If so move it */
696 unsigned long old_addr;
697 struct page *old_page;
698
699 old_addr = *old & PAGE_MASK;
700 old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
701 copy_highpage(page, old_page);
702 *old = addr | (*old & ~PAGE_MASK);
703
704 /* The old page I have found cannot be a
705 * destination page, so return it if it's
706 * gfp_flags honor the ones passed in.
707 */
708 if (!(gfp_mask & __GFP_HIGHMEM) &&
709 PageHighMem(old_page)) {
710 kimage_free_pages(old_page);
711 continue;
712 }
713 addr = old_addr;
714 page = old_page;
715 break;
716 }
717 /* Place the page on the destination list, to be used later */
718 list_add(&page->lru, &image->dest_pages);
719 }
720
721 return page;
722 }
723
724 static int kimage_load_normal_segment(struct kimage *image,
725 struct kexec_segment *segment)
726 {
727 unsigned long maddr;
728 size_t ubytes, mbytes;
729 int result;
730 unsigned char __user *buf = NULL;
731 unsigned char *kbuf = NULL;
732
733 result = 0;
734 if (image->file_mode)
735 kbuf = segment->kbuf;
736 else
737 buf = segment->buf;
738 ubytes = segment->bufsz;
739 mbytes = segment->memsz;
740 maddr = segment->mem;
741
742 result = kimage_set_destination(image, maddr);
743 if (result < 0)
744 goto out;
745
746 while (mbytes) {
747 struct page *page;
748 char *ptr;
749 size_t uchunk, mchunk;
750
751 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
752 if (!page) {
753 result = -ENOMEM;
754 goto out;
755 }
756 result = kimage_add_page(image, page_to_boot_pfn(page)
757 << PAGE_SHIFT);
758 if (result < 0)
759 goto out;
760
761 ptr = kmap(page);
762 /* Start with a clear page */
763 clear_page(ptr);
764 ptr += maddr & ~PAGE_MASK;
765 mchunk = min_t(size_t, mbytes,
766 PAGE_SIZE - (maddr & ~PAGE_MASK));
767 uchunk = min(ubytes, mchunk);
768
769 /* For file based kexec, source pages are in kernel memory */
770 if (image->file_mode)
771 memcpy(ptr, kbuf, uchunk);
772 else
773 result = copy_from_user(ptr, buf, uchunk);
774 kunmap(page);
775 if (result) {
776 result = -EFAULT;
777 goto out;
778 }
779 ubytes -= uchunk;
780 maddr += mchunk;
781 if (image->file_mode)
782 kbuf += mchunk;
783 else
784 buf += mchunk;
785 mbytes -= mchunk;
786 }
787 out:
788 return result;
789 }
790
791 static int kimage_load_crash_segment(struct kimage *image,
792 struct kexec_segment *segment)
793 {
794 /* For crash dumps kernels we simply copy the data from
795 * user space to it's destination.
796 * We do things a page at a time for the sake of kmap.
797 */
798 unsigned long maddr;
799 size_t ubytes, mbytes;
800 int result;
801 unsigned char __user *buf = NULL;
802 unsigned char *kbuf = NULL;
803
804 result = 0;
805 if (image->file_mode)
806 kbuf = segment->kbuf;
807 else
808 buf = segment->buf;
809 ubytes = segment->bufsz;
810 mbytes = segment->memsz;
811 maddr = segment->mem;
812 while (mbytes) {
813 struct page *page;
814 char *ptr;
815 size_t uchunk, mchunk;
816
817 page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
818 if (!page) {
819 result = -ENOMEM;
820 goto out;
821 }
822 ptr = kmap(page);
823 ptr += maddr & ~PAGE_MASK;
824 mchunk = min_t(size_t, mbytes,
825 PAGE_SIZE - (maddr & ~PAGE_MASK));
826 uchunk = min(ubytes, mchunk);
827 if (mchunk > uchunk) {
828 /* Zero the trailing part of the page */
829 memset(ptr + uchunk, 0, mchunk - uchunk);
830 }
831
832 /* For file based kexec, source pages are in kernel memory */
833 if (image->file_mode)
834 memcpy(ptr, kbuf, uchunk);
835 else
836 result = copy_from_user(ptr, buf, uchunk);
837 kexec_flush_icache_page(page);
838 kunmap(page);
839 if (result) {
840 result = -EFAULT;
841 goto out;
842 }
843 ubytes -= uchunk;
844 maddr += mchunk;
845 if (image->file_mode)
846 kbuf += mchunk;
847 else
848 buf += mchunk;
849 mbytes -= mchunk;
850 }
851 out:
852 return result;
853 }
854
855 int kimage_load_segment(struct kimage *image,
856 struct kexec_segment *segment)
857 {
858 int result = -ENOMEM;
859
860 switch (image->type) {
861 case KEXEC_TYPE_DEFAULT:
862 result = kimage_load_normal_segment(image, segment);
863 break;
864 case KEXEC_TYPE_CRASH:
865 result = kimage_load_crash_segment(image, segment);
866 break;
867 }
868
869 return result;
870 }
871
872 struct kimage *kexec_image;
873 struct kimage *kexec_crash_image;
874 int kexec_load_disabled;
875
876 /*
877 * No panic_cpu check version of crash_kexec(). This function is called
878 * only when panic_cpu holds the current CPU number; this is the only CPU
879 * which processes crash_kexec routines.
880 */
881 void __crash_kexec(struct pt_regs *regs)
882 {
883 /* Take the kexec_mutex here to prevent sys_kexec_load
884 * running on one cpu from replacing the crash kernel
885 * we are using after a panic on a different cpu.
886 *
887 * If the crash kernel was not located in a fixed area
888 * of memory the xchg(&kexec_crash_image) would be
889 * sufficient. But since I reuse the memory...
890 */
891 if (mutex_trylock(&kexec_mutex)) {
892 if (kexec_crash_image) {
893 struct pt_regs fixed_regs;
894
895 crash_setup_regs(&fixed_regs, regs);
896 crash_save_vmcoreinfo();
897 machine_crash_shutdown(&fixed_regs);
898 machine_kexec(kexec_crash_image);
899 }
900 mutex_unlock(&kexec_mutex);
901 }
902 }
903
904 void crash_kexec(struct pt_regs *regs)
905 {
906 int old_cpu, this_cpu;
907
908 /*
909 * Only one CPU is allowed to execute the crash_kexec() code as with
910 * panic(). Otherwise parallel calls of panic() and crash_kexec()
911 * may stop each other. To exclude them, we use panic_cpu here too.
912 */
913 this_cpu = raw_smp_processor_id();
914 old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
915 if (old_cpu == PANIC_CPU_INVALID) {
916 /* This is the 1st CPU which comes here, so go ahead. */
917 printk_nmi_flush_on_panic();
918 __crash_kexec(regs);
919
920 /*
921 * Reset panic_cpu to allow another panic()/crash_kexec()
922 * call.
923 */
924 atomic_set(&panic_cpu, PANIC_CPU_INVALID);
925 }
926 }
927
928 size_t crash_get_memory_size(void)
929 {
930 size_t size = 0;
931
932 mutex_lock(&kexec_mutex);
933 if (crashk_res.end != crashk_res.start)
934 size = resource_size(&crashk_res);
935 mutex_unlock(&kexec_mutex);
936 return size;
937 }
938
939 void __weak crash_free_reserved_phys_range(unsigned long begin,
940 unsigned long end)
941 {
942 unsigned long addr;
943
944 for (addr = begin; addr < end; addr += PAGE_SIZE)
945 free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
946 }
947
948 int crash_shrink_memory(unsigned long new_size)
949 {
950 int ret = 0;
951 unsigned long start, end;
952 unsigned long old_size;
953 struct resource *ram_res;
954
955 mutex_lock(&kexec_mutex);
956
957 if (kexec_crash_image) {
958 ret = -ENOENT;
959 goto unlock;
960 }
961 start = crashk_res.start;
962 end = crashk_res.end;
963 old_size = (end == 0) ? 0 : end - start + 1;
964 if (new_size >= old_size) {
965 ret = (new_size == old_size) ? 0 : -EINVAL;
966 goto unlock;
967 }
968
969 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
970 if (!ram_res) {
971 ret = -ENOMEM;
972 goto unlock;
973 }
974
975 start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
976 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
977
978 crash_free_reserved_phys_range(end, crashk_res.end);
979
980 if ((start == end) && (crashk_res.parent != NULL))
981 release_resource(&crashk_res);
982
983 ram_res->start = end;
984 ram_res->end = crashk_res.end;
985 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
986 ram_res->name = "System RAM";
987
988 crashk_res.end = end - 1;
989
990 insert_resource(&iomem_resource, ram_res);
991
992 unlock:
993 mutex_unlock(&kexec_mutex);
994 return ret;
995 }
996
997 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
998 size_t data_len)
999 {
1000 struct elf_note note;
1001
1002 note.n_namesz = strlen(name) + 1;
1003 note.n_descsz = data_len;
1004 note.n_type = type;
1005 memcpy(buf, &note, sizeof(note));
1006 buf += (sizeof(note) + 3)/4;
1007 memcpy(buf, name, note.n_namesz);
1008 buf += (note.n_namesz + 3)/4;
1009 memcpy(buf, data, note.n_descsz);
1010 buf += (note.n_descsz + 3)/4;
1011
1012 return buf;
1013 }
1014
1015 static void final_note(u32 *buf)
1016 {
1017 struct elf_note note;
1018
1019 note.n_namesz = 0;
1020 note.n_descsz = 0;
1021 note.n_type = 0;
1022 memcpy(buf, &note, sizeof(note));
1023 }
1024
1025 void crash_save_cpu(struct pt_regs *regs, int cpu)
1026 {
1027 struct elf_prstatus prstatus;
1028 u32 *buf;
1029
1030 if ((cpu < 0) || (cpu >= nr_cpu_ids))
1031 return;
1032
1033 /* Using ELF notes here is opportunistic.
1034 * I need a well defined structure format
1035 * for the data I pass, and I need tags
1036 * on the data to indicate what information I have
1037 * squirrelled away. ELF notes happen to provide
1038 * all of that, so there is no need to invent something new.
1039 */
1040 buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1041 if (!buf)
1042 return;
1043 memset(&prstatus, 0, sizeof(prstatus));
1044 prstatus.pr_pid = current->pid;
1045 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1046 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1047 &prstatus, sizeof(prstatus));
1048 final_note(buf);
1049 }
1050
1051 static int __init crash_notes_memory_init(void)
1052 {
1053 /* Allocate memory for saving cpu registers. */
1054 size_t size, align;
1055
1056 /*
1057 * crash_notes could be allocated across 2 vmalloc pages when percpu
1058 * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc
1059 * pages are also on 2 continuous physical pages. In this case the
1060 * 2nd part of crash_notes in 2nd page could be lost since only the
1061 * starting address and size of crash_notes are exported through sysfs.
1062 * Here round up the size of crash_notes to the nearest power of two
1063 * and pass it to __alloc_percpu as align value. This can make sure
1064 * crash_notes is allocated inside one physical page.
1065 */
1066 size = sizeof(note_buf_t);
1067 align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE);
1068
1069 /*
1070 * Break compile if size is bigger than PAGE_SIZE since crash_notes
1071 * definitely will be in 2 pages with that.
1072 */
1073 BUILD_BUG_ON(size > PAGE_SIZE);
1074
1075 crash_notes = __alloc_percpu(size, align);
1076 if (!crash_notes) {
1077 pr_warn("Memory allocation for saving cpu register states failed\n");
1078 return -ENOMEM;
1079 }
1080 return 0;
1081 }
1082 subsys_initcall(crash_notes_memory_init);
1083
1084
1085 /*
1086 * parsing the "crashkernel" commandline
1087 *
1088 * this code is intended to be called from architecture specific code
1089 */
1090
1091
1092 /*
1093 * This function parses command lines in the format
1094 *
1095 * crashkernel=ramsize-range:size[,...][@offset]
1096 *
1097 * The function returns 0 on success and -EINVAL on failure.
1098 */
1099 static int __init parse_crashkernel_mem(char *cmdline,
1100 unsigned long long system_ram,
1101 unsigned long long *crash_size,
1102 unsigned long long *crash_base)
1103 {
1104 char *cur = cmdline, *tmp;
1105
1106 /* for each entry of the comma-separated list */
1107 do {
1108 unsigned long long start, end = ULLONG_MAX, size;
1109
1110 /* get the start of the range */
1111 start = memparse(cur, &tmp);
1112 if (cur == tmp) {
1113 pr_warn("crashkernel: Memory value expected\n");
1114 return -EINVAL;
1115 }
1116 cur = tmp;
1117 if (*cur != '-') {
1118 pr_warn("crashkernel: '-' expected\n");
1119 return -EINVAL;
1120 }
1121 cur++;
1122
1123 /* if no ':' is here, than we read the end */
1124 if (*cur != ':') {
1125 end = memparse(cur, &tmp);
1126 if (cur == tmp) {
1127 pr_warn("crashkernel: Memory value expected\n");
1128 return -EINVAL;
1129 }
1130 cur = tmp;
1131 if (end <= start) {
1132 pr_warn("crashkernel: end <= start\n");
1133 return -EINVAL;
1134 }
1135 }
1136
1137 if (*cur != ':') {
1138 pr_warn("crashkernel: ':' expected\n");
1139 return -EINVAL;
1140 }
1141 cur++;
1142
1143 size = memparse(cur, &tmp);
1144 if (cur == tmp) {
1145 pr_warn("Memory value expected\n");
1146 return -EINVAL;
1147 }
1148 cur = tmp;
1149 if (size >= system_ram) {
1150 pr_warn("crashkernel: invalid size\n");
1151 return -EINVAL;
1152 }
1153
1154 /* match ? */
1155 if (system_ram >= start && system_ram < end) {
1156 *crash_size = size;
1157 break;
1158 }
1159 } while (*cur++ == ',');
1160
1161 if (*crash_size > 0) {
1162 while (*cur && *cur != ' ' && *cur != '@')
1163 cur++;
1164 if (*cur == '@') {
1165 cur++;
1166 *crash_base = memparse(cur, &tmp);
1167 if (cur == tmp) {
1168 pr_warn("Memory value expected after '@'\n");
1169 return -EINVAL;
1170 }
1171 }
1172 }
1173
1174 return 0;
1175 }
1176
1177 /*
1178 * That function parses "simple" (old) crashkernel command lines like
1179 *
1180 * crashkernel=size[@offset]
1181 *
1182 * It returns 0 on success and -EINVAL on failure.
1183 */
1184 static int __init parse_crashkernel_simple(char *cmdline,
1185 unsigned long long *crash_size,
1186 unsigned long long *crash_base)
1187 {
1188 char *cur = cmdline;
1189
1190 *crash_size = memparse(cmdline, &cur);
1191 if (cmdline == cur) {
1192 pr_warn("crashkernel: memory value expected\n");
1193 return -EINVAL;
1194 }
1195
1196 if (*cur == '@')
1197 *crash_base = memparse(cur+1, &cur);
1198 else if (*cur != ' ' && *cur != '\0') {
1199 pr_warn("crashkernel: unrecognized char: %c\n", *cur);
1200 return -EINVAL;
1201 }
1202
1203 return 0;
1204 }
1205
1206 #define SUFFIX_HIGH 0
1207 #define SUFFIX_LOW 1
1208 #define SUFFIX_NULL 2
1209 static __initdata char *suffix_tbl[] = {
1210 [SUFFIX_HIGH] = ",high",
1211 [SUFFIX_LOW] = ",low",
1212 [SUFFIX_NULL] = NULL,
1213 };
1214
1215 /*
1216 * That function parses "suffix" crashkernel command lines like
1217 *
1218 * crashkernel=size,[high|low]
1219 *
1220 * It returns 0 on success and -EINVAL on failure.
1221 */
1222 static int __init parse_crashkernel_suffix(char *cmdline,
1223 unsigned long long *crash_size,
1224 const char *suffix)
1225 {
1226 char *cur = cmdline;
1227
1228 *crash_size = memparse(cmdline, &cur);
1229 if (cmdline == cur) {
1230 pr_warn("crashkernel: memory value expected\n");
1231 return -EINVAL;
1232 }
1233
1234 /* check with suffix */
1235 if (strncmp(cur, suffix, strlen(suffix))) {
1236 pr_warn("crashkernel: unrecognized char: %c\n", *cur);
1237 return -EINVAL;
1238 }
1239 cur += strlen(suffix);
1240 if (*cur != ' ' && *cur != '\0') {
1241 pr_warn("crashkernel: unrecognized char: %c\n", *cur);
1242 return -EINVAL;
1243 }
1244
1245 return 0;
1246 }
1247
1248 static __init char *get_last_crashkernel(char *cmdline,
1249 const char *name,
1250 const char *suffix)
1251 {
1252 char *p = cmdline, *ck_cmdline = NULL;
1253
1254 /* find crashkernel and use the last one if there are more */
1255 p = strstr(p, name);
1256 while (p) {
1257 char *end_p = strchr(p, ' ');
1258 char *q;
1259
1260 if (!end_p)
1261 end_p = p + strlen(p);
1262
1263 if (!suffix) {
1264 int i;
1265
1266 /* skip the one with any known suffix */
1267 for (i = 0; suffix_tbl[i]; i++) {
1268 q = end_p - strlen(suffix_tbl[i]);
1269 if (!strncmp(q, suffix_tbl[i],
1270 strlen(suffix_tbl[i])))
1271 goto next;
1272 }
1273 ck_cmdline = p;
1274 } else {
1275 q = end_p - strlen(suffix);
1276 if (!strncmp(q, suffix, strlen(suffix)))
1277 ck_cmdline = p;
1278 }
1279 next:
1280 p = strstr(p+1, name);
1281 }
1282
1283 if (!ck_cmdline)
1284 return NULL;
1285
1286 return ck_cmdline;
1287 }
1288
1289 static int __init __parse_crashkernel(char *cmdline,
1290 unsigned long long system_ram,
1291 unsigned long long *crash_size,
1292 unsigned long long *crash_base,
1293 const char *name,
1294 const char *suffix)
1295 {
1296 char *first_colon, *first_space;
1297 char *ck_cmdline;
1298
1299 BUG_ON(!crash_size || !crash_base);
1300 *crash_size = 0;
1301 *crash_base = 0;
1302
1303 ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
1304
1305 if (!ck_cmdline)
1306 return -EINVAL;
1307
1308 ck_cmdline += strlen(name);
1309
1310 if (suffix)
1311 return parse_crashkernel_suffix(ck_cmdline, crash_size,
1312 suffix);
1313 /*
1314 * if the commandline contains a ':', then that's the extended
1315 * syntax -- if not, it must be the classic syntax
1316 */
1317 first_colon = strchr(ck_cmdline, ':');
1318 first_space = strchr(ck_cmdline, ' ');
1319 if (first_colon && (!first_space || first_colon < first_space))
1320 return parse_crashkernel_mem(ck_cmdline, system_ram,
1321 crash_size, crash_base);
1322
1323 return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base);
1324 }
1325
1326 /*
1327 * That function is the entry point for command line parsing and should be
1328 * called from the arch-specific code.
1329 */
1330 int __init parse_crashkernel(char *cmdline,
1331 unsigned long long system_ram,
1332 unsigned long long *crash_size,
1333 unsigned long long *crash_base)
1334 {
1335 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1336 "crashkernel=", NULL);
1337 }
1338
1339 int __init parse_crashkernel_high(char *cmdline,
1340 unsigned long long system_ram,
1341 unsigned long long *crash_size,
1342 unsigned long long *crash_base)
1343 {
1344 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1345 "crashkernel=", suffix_tbl[SUFFIX_HIGH]);
1346 }
1347
1348 int __init parse_crashkernel_low(char *cmdline,
1349 unsigned long long system_ram,
1350 unsigned long long *crash_size,
1351 unsigned long long *crash_base)
1352 {
1353 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1354 "crashkernel=", suffix_tbl[SUFFIX_LOW]);
1355 }
1356
1357 static void update_vmcoreinfo_note(void)
1358 {
1359 u32 *buf = vmcoreinfo_note;
1360
1361 if (!vmcoreinfo_size)
1362 return;
1363 buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1364 vmcoreinfo_size);
1365 final_note(buf);
1366 }
1367
1368 void crash_save_vmcoreinfo(void)
1369 {
1370 vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
1371 update_vmcoreinfo_note();
1372 }
1373
1374 void vmcoreinfo_append_str(const char *fmt, ...)
1375 {
1376 va_list args;
1377 char buf[0x50];
1378 size_t r;
1379
1380 va_start(args, fmt);
1381 r = vscnprintf(buf, sizeof(buf), fmt, args);
1382 va_end(args);
1383
1384 r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
1385
1386 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1387
1388 vmcoreinfo_size += r;
1389 }
1390
1391 /*
1392 * provide an empty default implementation here -- architecture
1393 * code may override this
1394 */
1395 void __weak arch_crash_save_vmcoreinfo(void)
1396 {}
1397
1398 phys_addr_t __weak paddr_vmcoreinfo_note(void)
1399 {
1400 return __pa((unsigned long)(char *)&vmcoreinfo_note);
1401 }
1402
1403 static int __init crash_save_vmcoreinfo_init(void)
1404 {
1405 VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1406 VMCOREINFO_PAGESIZE(PAGE_SIZE);
1407
1408 VMCOREINFO_SYMBOL(init_uts_ns);
1409 VMCOREINFO_SYMBOL(node_online_map);
1410 #ifdef CONFIG_MMU
1411 VMCOREINFO_SYMBOL(swapper_pg_dir);
1412 #endif
1413 VMCOREINFO_SYMBOL(_stext);
1414 VMCOREINFO_SYMBOL(vmap_area_list);
1415
1416 #ifndef CONFIG_NEED_MULTIPLE_NODES
1417 VMCOREINFO_SYMBOL(mem_map);
1418 VMCOREINFO_SYMBOL(contig_page_data);
1419 #endif
1420 #ifdef CONFIG_SPARSEMEM
1421 VMCOREINFO_SYMBOL(mem_section);
1422 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1423 VMCOREINFO_STRUCT_SIZE(mem_section);
1424 VMCOREINFO_OFFSET(mem_section, section_mem_map);
1425 #endif
1426 VMCOREINFO_STRUCT_SIZE(page);
1427 VMCOREINFO_STRUCT_SIZE(pglist_data);
1428 VMCOREINFO_STRUCT_SIZE(zone);
1429 VMCOREINFO_STRUCT_SIZE(free_area);
1430 VMCOREINFO_STRUCT_SIZE(list_head);
1431 VMCOREINFO_SIZE(nodemask_t);
1432 VMCOREINFO_OFFSET(page, flags);
1433 VMCOREINFO_OFFSET(page, _refcount);
1434 VMCOREINFO_OFFSET(page, mapping);
1435 VMCOREINFO_OFFSET(page, lru);
1436 VMCOREINFO_OFFSET(page, _mapcount);
1437 VMCOREINFO_OFFSET(page, private);
1438 VMCOREINFO_OFFSET(page, compound_dtor);
1439 VMCOREINFO_OFFSET(page, compound_order);
1440 VMCOREINFO_OFFSET(page, compound_head);
1441 VMCOREINFO_OFFSET(pglist_data, node_zones);
1442 VMCOREINFO_OFFSET(pglist_data, nr_zones);
1443 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1444 VMCOREINFO_OFFSET(pglist_data, node_mem_map);
1445 #endif
1446 VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1447 VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1448 VMCOREINFO_OFFSET(pglist_data, node_id);
1449 VMCOREINFO_OFFSET(zone, free_area);
1450 VMCOREINFO_OFFSET(zone, vm_stat);
1451 VMCOREINFO_OFFSET(zone, spanned_pages);
1452 VMCOREINFO_OFFSET(free_area, free_list);
1453 VMCOREINFO_OFFSET(list_head, next);
1454 VMCOREINFO_OFFSET(list_head, prev);
1455 VMCOREINFO_OFFSET(vmap_area, va_start);
1456 VMCOREINFO_OFFSET(vmap_area, list);
1457 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1458 log_buf_kexec_setup();
1459 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1460 VMCOREINFO_NUMBER(NR_FREE_PAGES);
1461 VMCOREINFO_NUMBER(PG_lru);
1462 VMCOREINFO_NUMBER(PG_private);
1463 VMCOREINFO_NUMBER(PG_swapcache);
1464 VMCOREINFO_NUMBER(PG_slab);
1465 #ifdef CONFIG_MEMORY_FAILURE
1466 VMCOREINFO_NUMBER(PG_hwpoison);
1467 #endif
1468 VMCOREINFO_NUMBER(PG_head_mask);
1469 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
1470 #ifdef CONFIG_X86
1471 VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE);
1472 #endif
1473 #ifdef CONFIG_HUGETLB_PAGE
1474 VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR);
1475 #endif
1476
1477 arch_crash_save_vmcoreinfo();
1478 update_vmcoreinfo_note();
1479
1480 return 0;
1481 }
1482
1483 subsys_initcall(crash_save_vmcoreinfo_init);
1484
1485 /*
1486 * Move into place and start executing a preloaded standalone
1487 * executable. If nothing was preloaded return an error.
1488 */
1489 int kernel_kexec(void)
1490 {
1491 int error = 0;
1492
1493 if (!mutex_trylock(&kexec_mutex))
1494 return -EBUSY;
1495 if (!kexec_image) {
1496 error = -EINVAL;
1497 goto Unlock;
1498 }
1499
1500 #ifdef CONFIG_KEXEC_JUMP
1501 if (kexec_image->preserve_context) {
1502 lock_system_sleep();
1503 pm_prepare_console();
1504 error = freeze_processes();
1505 if (error) {
1506 error = -EBUSY;
1507 goto Restore_console;
1508 }
1509 suspend_console();
1510 error = dpm_suspend_start(PMSG_FREEZE);
1511 if (error)
1512 goto Resume_console;
1513 /* At this point, dpm_suspend_start() has been called,
1514 * but *not* dpm_suspend_end(). We *must* call
1515 * dpm_suspend_end() now. Otherwise, drivers for
1516 * some devices (e.g. interrupt controllers) become
1517 * desynchronized with the actual state of the
1518 * hardware at resume time, and evil weirdness ensues.
1519 */
1520 error = dpm_suspend_end(PMSG_FREEZE);
1521 if (error)
1522 goto Resume_devices;
1523 error = disable_nonboot_cpus();
1524 if (error)
1525 goto Enable_cpus;
1526 local_irq_disable();
1527 error = syscore_suspend();
1528 if (error)
1529 goto Enable_irqs;
1530 } else
1531 #endif
1532 {
1533 kexec_in_progress = true;
1534 kernel_restart_prepare(NULL);
1535 migrate_to_reboot_cpu();
1536
1537 /*
1538 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1539 * no further code needs to use CPU hotplug (which is true in
1540 * the reboot case). However, the kexec path depends on using
1541 * CPU hotplug again; so re-enable it here.
1542 */
1543 cpu_hotplug_enable();
1544 pr_emerg("Starting new kernel\n");
1545 machine_shutdown();
1546 }
1547
1548 machine_kexec(kexec_image);
1549
1550 #ifdef CONFIG_KEXEC_JUMP
1551 if (kexec_image->preserve_context) {
1552 syscore_resume();
1553 Enable_irqs:
1554 local_irq_enable();
1555 Enable_cpus:
1556 enable_nonboot_cpus();
1557 dpm_resume_start(PMSG_RESTORE);
1558 Resume_devices:
1559 dpm_resume_end(PMSG_RESTORE);
1560 Resume_console:
1561 resume_console();
1562 thaw_processes();
1563 Restore_console:
1564 pm_restore_console();
1565 unlock_system_sleep();
1566 }
1567 #endif
1568
1569 Unlock:
1570 mutex_unlock(&kexec_mutex);
1571 return error;
1572 }
1573
1574 /*
1575 * Protection mechanism for crashkernel reserved memory after
1576 * the kdump kernel is loaded.
1577 *
1578 * Provide an empty default implementation here -- architecture
1579 * code may override this
1580 */
1581 void __weak arch_kexec_protect_crashkres(void)
1582 {}
1583
1584 void __weak arch_kexec_unprotect_crashkres(void)
1585 {}
This page took 0.061767 seconds and 5 git commands to generate.