2 * kexec.c - kexec system call core code.
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/capability.h>
13 #include <linux/file.h>
14 #include <linux/slab.h>
16 #include <linux/kexec.h>
17 #include <linux/mutex.h>
18 #include <linux/list.h>
19 #include <linux/highmem.h>
20 #include <linux/syscalls.h>
21 #include <linux/reboot.h>
22 #include <linux/ioport.h>
23 #include <linux/hardirq.h>
24 #include <linux/elf.h>
25 #include <linux/elfcore.h>
26 #include <linux/utsname.h>
27 #include <linux/numa.h>
28 #include <linux/suspend.h>
29 #include <linux/device.h>
30 #include <linux/freezer.h>
32 #include <linux/cpu.h>
33 #include <linux/uaccess.h>
35 #include <linux/console.h>
36 #include <linux/vmalloc.h>
37 #include <linux/swap.h>
38 #include <linux/syscore_ops.h>
39 #include <linux/compiler.h>
40 #include <linux/hugetlb.h>
43 #include <asm/sections.h>
45 #include <crypto/hash.h>
46 #include <crypto/sha.h>
47 #include "kexec_internal.h"
49 DEFINE_MUTEX(kexec_mutex
);
51 /* Per cpu memory for storing cpu states in case of system crash. */
52 note_buf_t __percpu
*crash_notes
;
54 /* vmcoreinfo stuff */
55 static unsigned char vmcoreinfo_data
[VMCOREINFO_BYTES
];
56 u32 vmcoreinfo_note
[VMCOREINFO_NOTE_SIZE
/4];
57 size_t vmcoreinfo_size
;
58 size_t vmcoreinfo_max_size
= sizeof(vmcoreinfo_data
);
60 /* Flag to indicate we are going to kexec a new kernel */
61 bool kexec_in_progress
= false;
64 /* Location of the reserved area for the crash kernel */
65 struct resource crashk_res
= {
66 .name
= "Crash kernel",
69 .flags
= IORESOURCE_BUSY
| IORESOURCE_SYSTEM_RAM
,
70 .desc
= IORES_DESC_CRASH_KERNEL
72 struct resource crashk_low_res
= {
73 .name
= "Crash kernel",
76 .flags
= IORESOURCE_BUSY
| IORESOURCE_SYSTEM_RAM
,
77 .desc
= IORES_DESC_CRASH_KERNEL
80 int kexec_should_crash(struct task_struct
*p
)
83 * If crash_kexec_post_notifiers is enabled, don't run
84 * crash_kexec() here yet, which must be run after panic
85 * notifiers in panic().
87 if (crash_kexec_post_notifiers
)
90 * There are 4 panic() calls in do_exit() path, each of which
91 * corresponds to each of these 4 conditions.
93 if (in_interrupt() || !p
->pid
|| is_global_init(p
) || panic_on_oops
)
98 int kexec_crash_loaded(void)
100 return !!kexec_crash_image
;
102 EXPORT_SYMBOL_GPL(kexec_crash_loaded
);
105 * When kexec transitions to the new kernel there is a one-to-one
106 * mapping between physical and virtual addresses. On processors
107 * where you can disable the MMU this is trivial, and easy. For
108 * others it is still a simple predictable page table to setup.
110 * In that environment kexec copies the new kernel to its final
111 * resting place. This means I can only support memory whose
112 * physical address can fit in an unsigned long. In particular
113 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
114 * If the assembly stub has more restrictive requirements
115 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
116 * defined more restrictively in <asm/kexec.h>.
118 * The code for the transition from the current kernel to the
119 * the new kernel is placed in the control_code_buffer, whose size
120 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
121 * page of memory is necessary, but some architectures require more.
122 * Because this memory must be identity mapped in the transition from
123 * virtual to physical addresses it must live in the range
124 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
127 * The assembly stub in the control code buffer is passed a linked list
128 * of descriptor pages detailing the source pages of the new kernel,
129 * and the destination addresses of those source pages. As this data
130 * structure is not used in the context of the current OS, it must
133 * The code has been made to work with highmem pages and will use a
134 * destination page in its final resting place (if it happens
135 * to allocate it). The end product of this is that most of the
136 * physical address space, and most of RAM can be used.
138 * Future directions include:
139 * - allocating a page table with the control code buffer identity
140 * mapped, to simplify machine_kexec and make kexec_on_panic more
145 * KIMAGE_NO_DEST is an impossible destination address..., for
146 * allocating pages whose destination address we do not care about.
148 #define KIMAGE_NO_DEST (-1UL)
149 #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
151 static struct page
*kimage_alloc_page(struct kimage
*image
,
155 int sanity_check_segment_list(struct kimage
*image
)
158 unsigned long nr_segments
= image
->nr_segments
;
159 unsigned long total_pages
= 0;
162 * Verify we have good destination addresses. The caller is
163 * responsible for making certain we don't attempt to load
164 * the new image into invalid or reserved areas of RAM. This
165 * just verifies it is an address we can use.
167 * Since the kernel does everything in page size chunks ensure
168 * the destination addresses are page aligned. Too many
169 * special cases crop of when we don't do this. The most
170 * insidious is getting overlapping destination addresses
171 * simply because addresses are changed to page size
174 for (i
= 0; i
< nr_segments
; i
++) {
175 unsigned long mstart
, mend
;
177 mstart
= image
->segment
[i
].mem
;
178 mend
= mstart
+ image
->segment
[i
].memsz
;
180 return -EADDRNOTAVAIL
;
181 if ((mstart
& ~PAGE_MASK
) || (mend
& ~PAGE_MASK
))
182 return -EADDRNOTAVAIL
;
183 if (mend
>= KEXEC_DESTINATION_MEMORY_LIMIT
)
184 return -EADDRNOTAVAIL
;
187 /* Verify our destination addresses do not overlap.
188 * If we alloed overlapping destination addresses
189 * through very weird things can happen with no
190 * easy explanation as one segment stops on another.
192 for (i
= 0; i
< nr_segments
; i
++) {
193 unsigned long mstart
, mend
;
196 mstart
= image
->segment
[i
].mem
;
197 mend
= mstart
+ image
->segment
[i
].memsz
;
198 for (j
= 0; j
< i
; j
++) {
199 unsigned long pstart
, pend
;
201 pstart
= image
->segment
[j
].mem
;
202 pend
= pstart
+ image
->segment
[j
].memsz
;
203 /* Do the segments overlap ? */
204 if ((mend
> pstart
) && (mstart
< pend
))
209 /* Ensure our buffer sizes are strictly less than
210 * our memory sizes. This should always be the case,
211 * and it is easier to check up front than to be surprised
214 for (i
= 0; i
< nr_segments
; i
++) {
215 if (image
->segment
[i
].bufsz
> image
->segment
[i
].memsz
)
220 * Verify that no more than half of memory will be consumed. If the
221 * request from userspace is too large, a large amount of time will be
222 * wasted allocating pages, which can cause a soft lockup.
224 for (i
= 0; i
< nr_segments
; i
++) {
225 if (PAGE_COUNT(image
->segment
[i
].memsz
) > totalram_pages
/ 2)
228 total_pages
+= PAGE_COUNT(image
->segment
[i
].memsz
);
231 if (total_pages
> totalram_pages
/ 2)
235 * Verify we have good destination addresses. Normally
236 * the caller is responsible for making certain we don't
237 * attempt to load the new image into invalid or reserved
238 * areas of RAM. But crash kernels are preloaded into a
239 * reserved area of ram. We must ensure the addresses
240 * are in the reserved area otherwise preloading the
241 * kernel could corrupt things.
244 if (image
->type
== KEXEC_TYPE_CRASH
) {
245 for (i
= 0; i
< nr_segments
; i
++) {
246 unsigned long mstart
, mend
;
248 mstart
= image
->segment
[i
].mem
;
249 mend
= mstart
+ image
->segment
[i
].memsz
- 1;
250 /* Ensure we are within the crash kernel limits */
251 if ((mstart
< phys_to_boot_phys(crashk_res
.start
)) ||
252 (mend
> phys_to_boot_phys(crashk_res
.end
)))
253 return -EADDRNOTAVAIL
;
260 struct kimage
*do_kimage_alloc_init(void)
262 struct kimage
*image
;
264 /* Allocate a controlling structure */
265 image
= kzalloc(sizeof(*image
), GFP_KERNEL
);
270 image
->entry
= &image
->head
;
271 image
->last_entry
= &image
->head
;
272 image
->control_page
= ~0; /* By default this does not apply */
273 image
->type
= KEXEC_TYPE_DEFAULT
;
275 /* Initialize the list of control pages */
276 INIT_LIST_HEAD(&image
->control_pages
);
278 /* Initialize the list of destination pages */
279 INIT_LIST_HEAD(&image
->dest_pages
);
281 /* Initialize the list of unusable pages */
282 INIT_LIST_HEAD(&image
->unusable_pages
);
287 int kimage_is_destination_range(struct kimage
*image
,
293 for (i
= 0; i
< image
->nr_segments
; i
++) {
294 unsigned long mstart
, mend
;
296 mstart
= image
->segment
[i
].mem
;
297 mend
= mstart
+ image
->segment
[i
].memsz
;
298 if ((end
> mstart
) && (start
< mend
))
305 static struct page
*kimage_alloc_pages(gfp_t gfp_mask
, unsigned int order
)
309 pages
= alloc_pages(gfp_mask
, order
);
311 unsigned int count
, i
;
313 pages
->mapping
= NULL
;
314 set_page_private(pages
, order
);
316 for (i
= 0; i
< count
; i
++)
317 SetPageReserved(pages
+ i
);
323 static void kimage_free_pages(struct page
*page
)
325 unsigned int order
, count
, i
;
327 order
= page_private(page
);
329 for (i
= 0; i
< count
; i
++)
330 ClearPageReserved(page
+ i
);
331 __free_pages(page
, order
);
334 void kimage_free_page_list(struct list_head
*list
)
336 struct page
*page
, *next
;
338 list_for_each_entry_safe(page
, next
, list
, lru
) {
339 list_del(&page
->lru
);
340 kimage_free_pages(page
);
344 static struct page
*kimage_alloc_normal_control_pages(struct kimage
*image
,
347 /* Control pages are special, they are the intermediaries
348 * that are needed while we copy the rest of the pages
349 * to their final resting place. As such they must
350 * not conflict with either the destination addresses
351 * or memory the kernel is already using.
353 * The only case where we really need more than one of
354 * these are for architectures where we cannot disable
355 * the MMU and must instead generate an identity mapped
356 * page table for all of the memory.
358 * At worst this runs in O(N) of the image size.
360 struct list_head extra_pages
;
365 INIT_LIST_HEAD(&extra_pages
);
367 /* Loop while I can allocate a page and the page allocated
368 * is a destination page.
371 unsigned long pfn
, epfn
, addr
, eaddr
;
373 pages
= kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP
, order
);
376 pfn
= page_to_boot_pfn(pages
);
378 addr
= pfn
<< PAGE_SHIFT
;
379 eaddr
= epfn
<< PAGE_SHIFT
;
380 if ((epfn
>= (KEXEC_CONTROL_MEMORY_LIMIT
>> PAGE_SHIFT
)) ||
381 kimage_is_destination_range(image
, addr
, eaddr
)) {
382 list_add(&pages
->lru
, &extra_pages
);
388 /* Remember the allocated page... */
389 list_add(&pages
->lru
, &image
->control_pages
);
391 /* Because the page is already in it's destination
392 * location we will never allocate another page at
393 * that address. Therefore kimage_alloc_pages
394 * will not return it (again) and we don't need
395 * to give it an entry in image->segment[].
398 /* Deal with the destination pages I have inadvertently allocated.
400 * Ideally I would convert multi-page allocations into single
401 * page allocations, and add everything to image->dest_pages.
403 * For now it is simpler to just free the pages.
405 kimage_free_page_list(&extra_pages
);
410 static struct page
*kimage_alloc_crash_control_pages(struct kimage
*image
,
413 /* Control pages are special, they are the intermediaries
414 * that are needed while we copy the rest of the pages
415 * to their final resting place. As such they must
416 * not conflict with either the destination addresses
417 * or memory the kernel is already using.
419 * Control pages are also the only pags we must allocate
420 * when loading a crash kernel. All of the other pages
421 * are specified by the segments and we just memcpy
422 * into them directly.
424 * The only case where we really need more than one of
425 * these are for architectures where we cannot disable
426 * the MMU and must instead generate an identity mapped
427 * page table for all of the memory.
429 * Given the low demand this implements a very simple
430 * allocator that finds the first hole of the appropriate
431 * size in the reserved memory region, and allocates all
432 * of the memory up to and including the hole.
434 unsigned long hole_start
, hole_end
, size
;
438 size
= (1 << order
) << PAGE_SHIFT
;
439 hole_start
= (image
->control_page
+ (size
- 1)) & ~(size
- 1);
440 hole_end
= hole_start
+ size
- 1;
441 while (hole_end
<= crashk_res
.end
) {
444 if (hole_end
> KEXEC_CRASH_CONTROL_MEMORY_LIMIT
)
446 /* See if I overlap any of the segments */
447 for (i
= 0; i
< image
->nr_segments
; i
++) {
448 unsigned long mstart
, mend
;
450 mstart
= image
->segment
[i
].mem
;
451 mend
= mstart
+ image
->segment
[i
].memsz
- 1;
452 if ((hole_end
>= mstart
) && (hole_start
<= mend
)) {
453 /* Advance the hole to the end of the segment */
454 hole_start
= (mend
+ (size
- 1)) & ~(size
- 1);
455 hole_end
= hole_start
+ size
- 1;
459 /* If I don't overlap any segments I have found my hole! */
460 if (i
== image
->nr_segments
) {
461 pages
= pfn_to_page(hole_start
>> PAGE_SHIFT
);
462 image
->control_page
= hole_end
;
471 struct page
*kimage_alloc_control_pages(struct kimage
*image
,
474 struct page
*pages
= NULL
;
476 switch (image
->type
) {
477 case KEXEC_TYPE_DEFAULT
:
478 pages
= kimage_alloc_normal_control_pages(image
, order
);
480 case KEXEC_TYPE_CRASH
:
481 pages
= kimage_alloc_crash_control_pages(image
, order
);
488 static int kimage_add_entry(struct kimage
*image
, kimage_entry_t entry
)
490 if (*image
->entry
!= 0)
493 if (image
->entry
== image
->last_entry
) {
494 kimage_entry_t
*ind_page
;
497 page
= kimage_alloc_page(image
, GFP_KERNEL
, KIMAGE_NO_DEST
);
501 ind_page
= page_address(page
);
502 *image
->entry
= virt_to_boot_phys(ind_page
) | IND_INDIRECTION
;
503 image
->entry
= ind_page
;
504 image
->last_entry
= ind_page
+
505 ((PAGE_SIZE
/sizeof(kimage_entry_t
)) - 1);
507 *image
->entry
= entry
;
514 static int kimage_set_destination(struct kimage
*image
,
515 unsigned long destination
)
519 destination
&= PAGE_MASK
;
520 result
= kimage_add_entry(image
, destination
| IND_DESTINATION
);
526 static int kimage_add_page(struct kimage
*image
, unsigned long page
)
531 result
= kimage_add_entry(image
, page
| IND_SOURCE
);
537 static void kimage_free_extra_pages(struct kimage
*image
)
539 /* Walk through and free any extra destination pages I may have */
540 kimage_free_page_list(&image
->dest_pages
);
542 /* Walk through and free any unusable pages I have cached */
543 kimage_free_page_list(&image
->unusable_pages
);
546 void kimage_terminate(struct kimage
*image
)
548 if (*image
->entry
!= 0)
551 *image
->entry
= IND_DONE
;
554 #define for_each_kimage_entry(image, ptr, entry) \
555 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
556 ptr = (entry & IND_INDIRECTION) ? \
557 boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
559 static void kimage_free_entry(kimage_entry_t entry
)
563 page
= boot_pfn_to_page(entry
>> PAGE_SHIFT
);
564 kimage_free_pages(page
);
567 void kimage_free(struct kimage
*image
)
569 kimage_entry_t
*ptr
, entry
;
570 kimage_entry_t ind
= 0;
575 kimage_free_extra_pages(image
);
576 for_each_kimage_entry(image
, ptr
, entry
) {
577 if (entry
& IND_INDIRECTION
) {
578 /* Free the previous indirection page */
579 if (ind
& IND_INDIRECTION
)
580 kimage_free_entry(ind
);
581 /* Save this indirection page until we are
585 } else if (entry
& IND_SOURCE
)
586 kimage_free_entry(entry
);
588 /* Free the final indirection page */
589 if (ind
& IND_INDIRECTION
)
590 kimage_free_entry(ind
);
592 /* Handle any machine specific cleanup */
593 machine_kexec_cleanup(image
);
595 /* Free the kexec control pages... */
596 kimage_free_page_list(&image
->control_pages
);
599 * Free up any temporary buffers allocated. This might hit if
600 * error occurred much later after buffer allocation.
602 if (image
->file_mode
)
603 kimage_file_post_load_cleanup(image
);
608 static kimage_entry_t
*kimage_dst_used(struct kimage
*image
,
611 kimage_entry_t
*ptr
, entry
;
612 unsigned long destination
= 0;
614 for_each_kimage_entry(image
, ptr
, entry
) {
615 if (entry
& IND_DESTINATION
)
616 destination
= entry
& PAGE_MASK
;
617 else if (entry
& IND_SOURCE
) {
618 if (page
== destination
)
620 destination
+= PAGE_SIZE
;
627 static struct page
*kimage_alloc_page(struct kimage
*image
,
629 unsigned long destination
)
632 * Here we implement safeguards to ensure that a source page
633 * is not copied to its destination page before the data on
634 * the destination page is no longer useful.
636 * To do this we maintain the invariant that a source page is
637 * either its own destination page, or it is not a
638 * destination page at all.
640 * That is slightly stronger than required, but the proof
641 * that no problems will not occur is trivial, and the
642 * implementation is simply to verify.
644 * When allocating all pages normally this algorithm will run
645 * in O(N) time, but in the worst case it will run in O(N^2)
646 * time. If the runtime is a problem the data structures can
653 * Walk through the list of destination pages, and see if I
656 list_for_each_entry(page
, &image
->dest_pages
, lru
) {
657 addr
= page_to_boot_pfn(page
) << PAGE_SHIFT
;
658 if (addr
== destination
) {
659 list_del(&page
->lru
);
667 /* Allocate a page, if we run out of memory give up */
668 page
= kimage_alloc_pages(gfp_mask
, 0);
671 /* If the page cannot be used file it away */
672 if (page_to_boot_pfn(page
) >
673 (KEXEC_SOURCE_MEMORY_LIMIT
>> PAGE_SHIFT
)) {
674 list_add(&page
->lru
, &image
->unusable_pages
);
677 addr
= page_to_boot_pfn(page
) << PAGE_SHIFT
;
679 /* If it is the destination page we want use it */
680 if (addr
== destination
)
683 /* If the page is not a destination page use it */
684 if (!kimage_is_destination_range(image
, addr
,
689 * I know that the page is someones destination page.
690 * See if there is already a source page for this
691 * destination page. And if so swap the source pages.
693 old
= kimage_dst_used(image
, addr
);
696 unsigned long old_addr
;
697 struct page
*old_page
;
699 old_addr
= *old
& PAGE_MASK
;
700 old_page
= boot_pfn_to_page(old_addr
>> PAGE_SHIFT
);
701 copy_highpage(page
, old_page
);
702 *old
= addr
| (*old
& ~PAGE_MASK
);
704 /* The old page I have found cannot be a
705 * destination page, so return it if it's
706 * gfp_flags honor the ones passed in.
708 if (!(gfp_mask
& __GFP_HIGHMEM
) &&
709 PageHighMem(old_page
)) {
710 kimage_free_pages(old_page
);
717 /* Place the page on the destination list, to be used later */
718 list_add(&page
->lru
, &image
->dest_pages
);
724 static int kimage_load_normal_segment(struct kimage
*image
,
725 struct kexec_segment
*segment
)
728 size_t ubytes
, mbytes
;
730 unsigned char __user
*buf
= NULL
;
731 unsigned char *kbuf
= NULL
;
734 if (image
->file_mode
)
735 kbuf
= segment
->kbuf
;
738 ubytes
= segment
->bufsz
;
739 mbytes
= segment
->memsz
;
740 maddr
= segment
->mem
;
742 result
= kimage_set_destination(image
, maddr
);
749 size_t uchunk
, mchunk
;
751 page
= kimage_alloc_page(image
, GFP_HIGHUSER
, maddr
);
756 result
= kimage_add_page(image
, page_to_boot_pfn(page
)
762 /* Start with a clear page */
764 ptr
+= maddr
& ~PAGE_MASK
;
765 mchunk
= min_t(size_t, mbytes
,
766 PAGE_SIZE
- (maddr
& ~PAGE_MASK
));
767 uchunk
= min(ubytes
, mchunk
);
769 /* For file based kexec, source pages are in kernel memory */
770 if (image
->file_mode
)
771 memcpy(ptr
, kbuf
, uchunk
);
773 result
= copy_from_user(ptr
, buf
, uchunk
);
781 if (image
->file_mode
)
791 static int kimage_load_crash_segment(struct kimage
*image
,
792 struct kexec_segment
*segment
)
794 /* For crash dumps kernels we simply copy the data from
795 * user space to it's destination.
796 * We do things a page at a time for the sake of kmap.
799 size_t ubytes
, mbytes
;
801 unsigned char __user
*buf
= NULL
;
802 unsigned char *kbuf
= NULL
;
805 if (image
->file_mode
)
806 kbuf
= segment
->kbuf
;
809 ubytes
= segment
->bufsz
;
810 mbytes
= segment
->memsz
;
811 maddr
= segment
->mem
;
815 size_t uchunk
, mchunk
;
817 page
= boot_pfn_to_page(maddr
>> PAGE_SHIFT
);
823 ptr
+= maddr
& ~PAGE_MASK
;
824 mchunk
= min_t(size_t, mbytes
,
825 PAGE_SIZE
- (maddr
& ~PAGE_MASK
));
826 uchunk
= min(ubytes
, mchunk
);
827 if (mchunk
> uchunk
) {
828 /* Zero the trailing part of the page */
829 memset(ptr
+ uchunk
, 0, mchunk
- uchunk
);
832 /* For file based kexec, source pages are in kernel memory */
833 if (image
->file_mode
)
834 memcpy(ptr
, kbuf
, uchunk
);
836 result
= copy_from_user(ptr
, buf
, uchunk
);
837 kexec_flush_icache_page(page
);
845 if (image
->file_mode
)
855 int kimage_load_segment(struct kimage
*image
,
856 struct kexec_segment
*segment
)
858 int result
= -ENOMEM
;
860 switch (image
->type
) {
861 case KEXEC_TYPE_DEFAULT
:
862 result
= kimage_load_normal_segment(image
, segment
);
864 case KEXEC_TYPE_CRASH
:
865 result
= kimage_load_crash_segment(image
, segment
);
872 struct kimage
*kexec_image
;
873 struct kimage
*kexec_crash_image
;
874 int kexec_load_disabled
;
877 * No panic_cpu check version of crash_kexec(). This function is called
878 * only when panic_cpu holds the current CPU number; this is the only CPU
879 * which processes crash_kexec routines.
881 void __crash_kexec(struct pt_regs
*regs
)
883 /* Take the kexec_mutex here to prevent sys_kexec_load
884 * running on one cpu from replacing the crash kernel
885 * we are using after a panic on a different cpu.
887 * If the crash kernel was not located in a fixed area
888 * of memory the xchg(&kexec_crash_image) would be
889 * sufficient. But since I reuse the memory...
891 if (mutex_trylock(&kexec_mutex
)) {
892 if (kexec_crash_image
) {
893 struct pt_regs fixed_regs
;
895 crash_setup_regs(&fixed_regs
, regs
);
896 crash_save_vmcoreinfo();
897 machine_crash_shutdown(&fixed_regs
);
898 machine_kexec(kexec_crash_image
);
900 mutex_unlock(&kexec_mutex
);
904 void crash_kexec(struct pt_regs
*regs
)
906 int old_cpu
, this_cpu
;
909 * Only one CPU is allowed to execute the crash_kexec() code as with
910 * panic(). Otherwise parallel calls of panic() and crash_kexec()
911 * may stop each other. To exclude them, we use panic_cpu here too.
913 this_cpu
= raw_smp_processor_id();
914 old_cpu
= atomic_cmpxchg(&panic_cpu
, PANIC_CPU_INVALID
, this_cpu
);
915 if (old_cpu
== PANIC_CPU_INVALID
) {
916 /* This is the 1st CPU which comes here, so go ahead. */
917 printk_nmi_flush_on_panic();
921 * Reset panic_cpu to allow another panic()/crash_kexec()
924 atomic_set(&panic_cpu
, PANIC_CPU_INVALID
);
928 size_t crash_get_memory_size(void)
932 mutex_lock(&kexec_mutex
);
933 if (crashk_res
.end
!= crashk_res
.start
)
934 size
= resource_size(&crashk_res
);
935 mutex_unlock(&kexec_mutex
);
939 void __weak
crash_free_reserved_phys_range(unsigned long begin
,
944 for (addr
= begin
; addr
< end
; addr
+= PAGE_SIZE
)
945 free_reserved_page(boot_pfn_to_page(addr
>> PAGE_SHIFT
));
948 int crash_shrink_memory(unsigned long new_size
)
951 unsigned long start
, end
;
952 unsigned long old_size
;
953 struct resource
*ram_res
;
955 mutex_lock(&kexec_mutex
);
957 if (kexec_crash_image
) {
961 start
= crashk_res
.start
;
962 end
= crashk_res
.end
;
963 old_size
= (end
== 0) ? 0 : end
- start
+ 1;
964 if (new_size
>= old_size
) {
965 ret
= (new_size
== old_size
) ? 0 : -EINVAL
;
969 ram_res
= kzalloc(sizeof(*ram_res
), GFP_KERNEL
);
975 start
= roundup(start
, KEXEC_CRASH_MEM_ALIGN
);
976 end
= roundup(start
+ new_size
, KEXEC_CRASH_MEM_ALIGN
);
978 crash_free_reserved_phys_range(end
, crashk_res
.end
);
980 if ((start
== end
) && (crashk_res
.parent
!= NULL
))
981 release_resource(&crashk_res
);
983 ram_res
->start
= end
;
984 ram_res
->end
= crashk_res
.end
;
985 ram_res
->flags
= IORESOURCE_BUSY
| IORESOURCE_SYSTEM_RAM
;
986 ram_res
->name
= "System RAM";
988 crashk_res
.end
= end
- 1;
990 insert_resource(&iomem_resource
, ram_res
);
993 mutex_unlock(&kexec_mutex
);
997 static u32
*append_elf_note(u32
*buf
, char *name
, unsigned type
, void *data
,
1000 struct elf_note note
;
1002 note
.n_namesz
= strlen(name
) + 1;
1003 note
.n_descsz
= data_len
;
1005 memcpy(buf
, ¬e
, sizeof(note
));
1006 buf
+= (sizeof(note
) + 3)/4;
1007 memcpy(buf
, name
, note
.n_namesz
);
1008 buf
+= (note
.n_namesz
+ 3)/4;
1009 memcpy(buf
, data
, note
.n_descsz
);
1010 buf
+= (note
.n_descsz
+ 3)/4;
1015 static void final_note(u32
*buf
)
1017 struct elf_note note
;
1022 memcpy(buf
, ¬e
, sizeof(note
));
1025 void crash_save_cpu(struct pt_regs
*regs
, int cpu
)
1027 struct elf_prstatus prstatus
;
1030 if ((cpu
< 0) || (cpu
>= nr_cpu_ids
))
1033 /* Using ELF notes here is opportunistic.
1034 * I need a well defined structure format
1035 * for the data I pass, and I need tags
1036 * on the data to indicate what information I have
1037 * squirrelled away. ELF notes happen to provide
1038 * all of that, so there is no need to invent something new.
1040 buf
= (u32
*)per_cpu_ptr(crash_notes
, cpu
);
1043 memset(&prstatus
, 0, sizeof(prstatus
));
1044 prstatus
.pr_pid
= current
->pid
;
1045 elf_core_copy_kernel_regs(&prstatus
.pr_reg
, regs
);
1046 buf
= append_elf_note(buf
, KEXEC_CORE_NOTE_NAME
, NT_PRSTATUS
,
1047 &prstatus
, sizeof(prstatus
));
1051 static int __init
crash_notes_memory_init(void)
1053 /* Allocate memory for saving cpu registers. */
1057 * crash_notes could be allocated across 2 vmalloc pages when percpu
1058 * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc
1059 * pages are also on 2 continuous physical pages. In this case the
1060 * 2nd part of crash_notes in 2nd page could be lost since only the
1061 * starting address and size of crash_notes are exported through sysfs.
1062 * Here round up the size of crash_notes to the nearest power of two
1063 * and pass it to __alloc_percpu as align value. This can make sure
1064 * crash_notes is allocated inside one physical page.
1066 size
= sizeof(note_buf_t
);
1067 align
= min(roundup_pow_of_two(sizeof(note_buf_t
)), PAGE_SIZE
);
1070 * Break compile if size is bigger than PAGE_SIZE since crash_notes
1071 * definitely will be in 2 pages with that.
1073 BUILD_BUG_ON(size
> PAGE_SIZE
);
1075 crash_notes
= __alloc_percpu(size
, align
);
1077 pr_warn("Memory allocation for saving cpu register states failed\n");
1082 subsys_initcall(crash_notes_memory_init
);
1086 * parsing the "crashkernel" commandline
1088 * this code is intended to be called from architecture specific code
1093 * This function parses command lines in the format
1095 * crashkernel=ramsize-range:size[,...][@offset]
1097 * The function returns 0 on success and -EINVAL on failure.
1099 static int __init
parse_crashkernel_mem(char *cmdline
,
1100 unsigned long long system_ram
,
1101 unsigned long long *crash_size
,
1102 unsigned long long *crash_base
)
1104 char *cur
= cmdline
, *tmp
;
1106 /* for each entry of the comma-separated list */
1108 unsigned long long start
, end
= ULLONG_MAX
, size
;
1110 /* get the start of the range */
1111 start
= memparse(cur
, &tmp
);
1113 pr_warn("crashkernel: Memory value expected\n");
1118 pr_warn("crashkernel: '-' expected\n");
1123 /* if no ':' is here, than we read the end */
1125 end
= memparse(cur
, &tmp
);
1127 pr_warn("crashkernel: Memory value expected\n");
1132 pr_warn("crashkernel: end <= start\n");
1138 pr_warn("crashkernel: ':' expected\n");
1143 size
= memparse(cur
, &tmp
);
1145 pr_warn("Memory value expected\n");
1149 if (size
>= system_ram
) {
1150 pr_warn("crashkernel: invalid size\n");
1155 if (system_ram
>= start
&& system_ram
< end
) {
1159 } while (*cur
++ == ',');
1161 if (*crash_size
> 0) {
1162 while (*cur
&& *cur
!= ' ' && *cur
!= '@')
1166 *crash_base
= memparse(cur
, &tmp
);
1168 pr_warn("Memory value expected after '@'\n");
1178 * That function parses "simple" (old) crashkernel command lines like
1180 * crashkernel=size[@offset]
1182 * It returns 0 on success and -EINVAL on failure.
1184 static int __init
parse_crashkernel_simple(char *cmdline
,
1185 unsigned long long *crash_size
,
1186 unsigned long long *crash_base
)
1188 char *cur
= cmdline
;
1190 *crash_size
= memparse(cmdline
, &cur
);
1191 if (cmdline
== cur
) {
1192 pr_warn("crashkernel: memory value expected\n");
1197 *crash_base
= memparse(cur
+1, &cur
);
1198 else if (*cur
!= ' ' && *cur
!= '\0') {
1199 pr_warn("crashkernel: unrecognized char: %c\n", *cur
);
1206 #define SUFFIX_HIGH 0
1207 #define SUFFIX_LOW 1
1208 #define SUFFIX_NULL 2
1209 static __initdata
char *suffix_tbl
[] = {
1210 [SUFFIX_HIGH
] = ",high",
1211 [SUFFIX_LOW
] = ",low",
1212 [SUFFIX_NULL
] = NULL
,
1216 * That function parses "suffix" crashkernel command lines like
1218 * crashkernel=size,[high|low]
1220 * It returns 0 on success and -EINVAL on failure.
1222 static int __init
parse_crashkernel_suffix(char *cmdline
,
1223 unsigned long long *crash_size
,
1226 char *cur
= cmdline
;
1228 *crash_size
= memparse(cmdline
, &cur
);
1229 if (cmdline
== cur
) {
1230 pr_warn("crashkernel: memory value expected\n");
1234 /* check with suffix */
1235 if (strncmp(cur
, suffix
, strlen(suffix
))) {
1236 pr_warn("crashkernel: unrecognized char: %c\n", *cur
);
1239 cur
+= strlen(suffix
);
1240 if (*cur
!= ' ' && *cur
!= '\0') {
1241 pr_warn("crashkernel: unrecognized char: %c\n", *cur
);
1248 static __init
char *get_last_crashkernel(char *cmdline
,
1252 char *p
= cmdline
, *ck_cmdline
= NULL
;
1254 /* find crashkernel and use the last one if there are more */
1255 p
= strstr(p
, name
);
1257 char *end_p
= strchr(p
, ' ');
1261 end_p
= p
+ strlen(p
);
1266 /* skip the one with any known suffix */
1267 for (i
= 0; suffix_tbl
[i
]; i
++) {
1268 q
= end_p
- strlen(suffix_tbl
[i
]);
1269 if (!strncmp(q
, suffix_tbl
[i
],
1270 strlen(suffix_tbl
[i
])))
1275 q
= end_p
- strlen(suffix
);
1276 if (!strncmp(q
, suffix
, strlen(suffix
)))
1280 p
= strstr(p
+1, name
);
1289 static int __init
__parse_crashkernel(char *cmdline
,
1290 unsigned long long system_ram
,
1291 unsigned long long *crash_size
,
1292 unsigned long long *crash_base
,
1296 char *first_colon
, *first_space
;
1299 BUG_ON(!crash_size
|| !crash_base
);
1303 ck_cmdline
= get_last_crashkernel(cmdline
, name
, suffix
);
1308 ck_cmdline
+= strlen(name
);
1311 return parse_crashkernel_suffix(ck_cmdline
, crash_size
,
1314 * if the commandline contains a ':', then that's the extended
1315 * syntax -- if not, it must be the classic syntax
1317 first_colon
= strchr(ck_cmdline
, ':');
1318 first_space
= strchr(ck_cmdline
, ' ');
1319 if (first_colon
&& (!first_space
|| first_colon
< first_space
))
1320 return parse_crashkernel_mem(ck_cmdline
, system_ram
,
1321 crash_size
, crash_base
);
1323 return parse_crashkernel_simple(ck_cmdline
, crash_size
, crash_base
);
1327 * That function is the entry point for command line parsing and should be
1328 * called from the arch-specific code.
1330 int __init
parse_crashkernel(char *cmdline
,
1331 unsigned long long system_ram
,
1332 unsigned long long *crash_size
,
1333 unsigned long long *crash_base
)
1335 return __parse_crashkernel(cmdline
, system_ram
, crash_size
, crash_base
,
1336 "crashkernel=", NULL
);
1339 int __init
parse_crashkernel_high(char *cmdline
,
1340 unsigned long long system_ram
,
1341 unsigned long long *crash_size
,
1342 unsigned long long *crash_base
)
1344 return __parse_crashkernel(cmdline
, system_ram
, crash_size
, crash_base
,
1345 "crashkernel=", suffix_tbl
[SUFFIX_HIGH
]);
1348 int __init
parse_crashkernel_low(char *cmdline
,
1349 unsigned long long system_ram
,
1350 unsigned long long *crash_size
,
1351 unsigned long long *crash_base
)
1353 return __parse_crashkernel(cmdline
, system_ram
, crash_size
, crash_base
,
1354 "crashkernel=", suffix_tbl
[SUFFIX_LOW
]);
1357 static void update_vmcoreinfo_note(void)
1359 u32
*buf
= vmcoreinfo_note
;
1361 if (!vmcoreinfo_size
)
1363 buf
= append_elf_note(buf
, VMCOREINFO_NOTE_NAME
, 0, vmcoreinfo_data
,
1368 void crash_save_vmcoreinfo(void)
1370 vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
1371 update_vmcoreinfo_note();
1374 void vmcoreinfo_append_str(const char *fmt
, ...)
1380 va_start(args
, fmt
);
1381 r
= vscnprintf(buf
, sizeof(buf
), fmt
, args
);
1384 r
= min(r
, vmcoreinfo_max_size
- vmcoreinfo_size
);
1386 memcpy(&vmcoreinfo_data
[vmcoreinfo_size
], buf
, r
);
1388 vmcoreinfo_size
+= r
;
1392 * provide an empty default implementation here -- architecture
1393 * code may override this
1395 void __weak
arch_crash_save_vmcoreinfo(void)
1398 phys_addr_t __weak
paddr_vmcoreinfo_note(void)
1400 return __pa((unsigned long)(char *)&vmcoreinfo_note
);
1403 static int __init
crash_save_vmcoreinfo_init(void)
1405 VMCOREINFO_OSRELEASE(init_uts_ns
.name
.release
);
1406 VMCOREINFO_PAGESIZE(PAGE_SIZE
);
1408 VMCOREINFO_SYMBOL(init_uts_ns
);
1409 VMCOREINFO_SYMBOL(node_online_map
);
1411 VMCOREINFO_SYMBOL(swapper_pg_dir
);
1413 VMCOREINFO_SYMBOL(_stext
);
1414 VMCOREINFO_SYMBOL(vmap_area_list
);
1416 #ifndef CONFIG_NEED_MULTIPLE_NODES
1417 VMCOREINFO_SYMBOL(mem_map
);
1418 VMCOREINFO_SYMBOL(contig_page_data
);
1420 #ifdef CONFIG_SPARSEMEM
1421 VMCOREINFO_SYMBOL(mem_section
);
1422 VMCOREINFO_LENGTH(mem_section
, NR_SECTION_ROOTS
);
1423 VMCOREINFO_STRUCT_SIZE(mem_section
);
1424 VMCOREINFO_OFFSET(mem_section
, section_mem_map
);
1426 VMCOREINFO_STRUCT_SIZE(page
);
1427 VMCOREINFO_STRUCT_SIZE(pglist_data
);
1428 VMCOREINFO_STRUCT_SIZE(zone
);
1429 VMCOREINFO_STRUCT_SIZE(free_area
);
1430 VMCOREINFO_STRUCT_SIZE(list_head
);
1431 VMCOREINFO_SIZE(nodemask_t
);
1432 VMCOREINFO_OFFSET(page
, flags
);
1433 VMCOREINFO_OFFSET(page
, _refcount
);
1434 VMCOREINFO_OFFSET(page
, mapping
);
1435 VMCOREINFO_OFFSET(page
, lru
);
1436 VMCOREINFO_OFFSET(page
, _mapcount
);
1437 VMCOREINFO_OFFSET(page
, private);
1438 VMCOREINFO_OFFSET(page
, compound_dtor
);
1439 VMCOREINFO_OFFSET(page
, compound_order
);
1440 VMCOREINFO_OFFSET(page
, compound_head
);
1441 VMCOREINFO_OFFSET(pglist_data
, node_zones
);
1442 VMCOREINFO_OFFSET(pglist_data
, nr_zones
);
1443 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1444 VMCOREINFO_OFFSET(pglist_data
, node_mem_map
);
1446 VMCOREINFO_OFFSET(pglist_data
, node_start_pfn
);
1447 VMCOREINFO_OFFSET(pglist_data
, node_spanned_pages
);
1448 VMCOREINFO_OFFSET(pglist_data
, node_id
);
1449 VMCOREINFO_OFFSET(zone
, free_area
);
1450 VMCOREINFO_OFFSET(zone
, vm_stat
);
1451 VMCOREINFO_OFFSET(zone
, spanned_pages
);
1452 VMCOREINFO_OFFSET(free_area
, free_list
);
1453 VMCOREINFO_OFFSET(list_head
, next
);
1454 VMCOREINFO_OFFSET(list_head
, prev
);
1455 VMCOREINFO_OFFSET(vmap_area
, va_start
);
1456 VMCOREINFO_OFFSET(vmap_area
, list
);
1457 VMCOREINFO_LENGTH(zone
.free_area
, MAX_ORDER
);
1458 log_buf_kexec_setup();
1459 VMCOREINFO_LENGTH(free_area
.free_list
, MIGRATE_TYPES
);
1460 VMCOREINFO_NUMBER(NR_FREE_PAGES
);
1461 VMCOREINFO_NUMBER(PG_lru
);
1462 VMCOREINFO_NUMBER(PG_private
);
1463 VMCOREINFO_NUMBER(PG_swapcache
);
1464 VMCOREINFO_NUMBER(PG_slab
);
1465 #ifdef CONFIG_MEMORY_FAILURE
1466 VMCOREINFO_NUMBER(PG_hwpoison
);
1468 VMCOREINFO_NUMBER(PG_head_mask
);
1469 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE
);
1471 VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE
);
1473 #ifdef CONFIG_HUGETLB_PAGE
1474 VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR
);
1477 arch_crash_save_vmcoreinfo();
1478 update_vmcoreinfo_note();
1483 subsys_initcall(crash_save_vmcoreinfo_init
);
1486 * Move into place and start executing a preloaded standalone
1487 * executable. If nothing was preloaded return an error.
1489 int kernel_kexec(void)
1493 if (!mutex_trylock(&kexec_mutex
))
1500 #ifdef CONFIG_KEXEC_JUMP
1501 if (kexec_image
->preserve_context
) {
1502 lock_system_sleep();
1503 pm_prepare_console();
1504 error
= freeze_processes();
1507 goto Restore_console
;
1510 error
= dpm_suspend_start(PMSG_FREEZE
);
1512 goto Resume_console
;
1513 /* At this point, dpm_suspend_start() has been called,
1514 * but *not* dpm_suspend_end(). We *must* call
1515 * dpm_suspend_end() now. Otherwise, drivers for
1516 * some devices (e.g. interrupt controllers) become
1517 * desynchronized with the actual state of the
1518 * hardware at resume time, and evil weirdness ensues.
1520 error
= dpm_suspend_end(PMSG_FREEZE
);
1522 goto Resume_devices
;
1523 error
= disable_nonboot_cpus();
1526 local_irq_disable();
1527 error
= syscore_suspend();
1533 kexec_in_progress
= true;
1534 kernel_restart_prepare(NULL
);
1535 migrate_to_reboot_cpu();
1538 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1539 * no further code needs to use CPU hotplug (which is true in
1540 * the reboot case). However, the kexec path depends on using
1541 * CPU hotplug again; so re-enable it here.
1543 cpu_hotplug_enable();
1544 pr_emerg("Starting new kernel\n");
1548 machine_kexec(kexec_image
);
1550 #ifdef CONFIG_KEXEC_JUMP
1551 if (kexec_image
->preserve_context
) {
1556 enable_nonboot_cpus();
1557 dpm_resume_start(PMSG_RESTORE
);
1559 dpm_resume_end(PMSG_RESTORE
);
1564 pm_restore_console();
1565 unlock_system_sleep();
1570 mutex_unlock(&kexec_mutex
);
1575 * Protection mechanism for crashkernel reserved memory after
1576 * the kdump kernel is loaded.
1578 * Provide an empty default implementation here -- architecture
1579 * code may override this
1581 void __weak
arch_kexec_protect_crashkres(void)
1584 void __weak
arch_kexec_unprotect_crashkres(void)