2 * Hibernate support specific for ARM64
4 * Derived from work on ARM hibernation support by:
6 * Ubuntu project, hibernation support for mach-dove
7 * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
8 * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
9 * https://lkml.org/lkml/2010/6/18/4
10 * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
11 * https://patchwork.kernel.org/patch/96442/
13 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
15 * License terms: GNU General Public License (GPL) version 2
17 #define pr_fmt(x) "hibernate: " x
18 #include <linux/kvm_host.h>
20 #include <linux/notifier.h>
22 #include <linux/sched.h>
23 #include <linux/suspend.h>
24 #include <linux/utsname.h>
25 #include <linux/version.h>
27 #include <asm/barrier.h>
28 #include <asm/cacheflush.h>
29 #include <asm/irqflags.h>
30 #include <asm/memory.h>
31 #include <asm/mmu_context.h>
32 #include <asm/pgalloc.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgtable-hwdef.h>
35 #include <asm/sections.h>
37 #include <asm/suspend.h>
41 * Hibernate core relies on this value being 0 on resume, and marks it
42 * __nosavedata assuming it will keep the resume kernel's '0' value. This
43 * doesn't happen with either KASLR.
45 * defined as "__visible int in_suspend __nosavedata" in
46 * kernel/power/hibernate.c
48 extern int in_suspend
;
50 /* Find a symbols alias in the linear map */
51 #define LMADDR(x) phys_to_virt(virt_to_phys(x))
53 /* Do we need to reset el2? */
54 #define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
57 * Start/end of the hibernate exit code, this must be copied to a 'safe'
58 * location in memory, and executed from there.
60 extern char __hibernate_exit_text_start
[], __hibernate_exit_text_end
[];
62 /* temporary el2 vectors in the __hibernate_exit_text section. */
63 extern char hibernate_el2_vectors
[];
65 /* hyp-stub vectors, used to restore el2 during resume from hibernate. */
66 extern char __hyp_stub_vectors
[];
69 * Values that may not change over hibernate/resume. We put the build number
70 * and date in here so that we guarantee not to resume with a different
73 struct arch_hibernate_hdr_invariants
{
74 char uts_version
[__NEW_UTS_LEN
+ 1];
77 /* These values need to be know across a hibernate/restore. */
78 static struct arch_hibernate_hdr
{
79 struct arch_hibernate_hdr_invariants invariants
;
81 /* These are needed to find the relocated kernel if built with kaslr */
82 phys_addr_t ttbr1_el1
;
83 void (*reenter_kernel
)(void);
86 * We need to know where the __hyp_stub_vectors are after restore to
89 phys_addr_t __hyp_stub_vectors
;
92 static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants
*i
)
94 memset(i
, 0, sizeof(*i
));
95 memcpy(i
->uts_version
, init_utsname()->version
, sizeof(i
->uts_version
));
98 int pfn_is_nosave(unsigned long pfn
)
100 unsigned long nosave_begin_pfn
= virt_to_pfn(&__nosave_begin
);
101 unsigned long nosave_end_pfn
= virt_to_pfn(&__nosave_end
- 1);
103 return (pfn
>= nosave_begin_pfn
) && (pfn
<= nosave_end_pfn
);
106 void notrace
save_processor_state(void)
108 WARN_ON(num_online_cpus() != 1);
111 void notrace
restore_processor_state(void)
115 int arch_hibernation_header_save(void *addr
, unsigned int max_size
)
117 struct arch_hibernate_hdr
*hdr
= addr
;
119 if (max_size
< sizeof(*hdr
))
122 arch_hdr_invariants(&hdr
->invariants
);
123 hdr
->ttbr1_el1
= virt_to_phys(swapper_pg_dir
);
124 hdr
->reenter_kernel
= _cpu_resume
;
126 /* We can't use __hyp_get_vectors() because kvm may still be loaded */
127 if (el2_reset_needed())
128 hdr
->__hyp_stub_vectors
= virt_to_phys(__hyp_stub_vectors
);
130 hdr
->__hyp_stub_vectors
= 0;
134 EXPORT_SYMBOL(arch_hibernation_header_save
);
136 int arch_hibernation_header_restore(void *addr
)
138 struct arch_hibernate_hdr_invariants invariants
;
139 struct arch_hibernate_hdr
*hdr
= addr
;
141 arch_hdr_invariants(&invariants
);
142 if (memcmp(&hdr
->invariants
, &invariants
, sizeof(invariants
))) {
143 pr_crit("Hibernate image not generated by this kernel!\n");
151 EXPORT_SYMBOL(arch_hibernation_header_restore
);
154 * Copies length bytes, starting at src_start into an new page,
155 * perform cache maintentance, then maps it at the specified address low
156 * address as executable.
158 * This is used by hibernate to copy the code it needs to execute when
159 * overwriting the kernel text. This function generates a new set of page
160 * tables, which it loads into ttbr0.
162 * Length is provided as we probably only want 4K of data, even on a 64K
165 static int create_safe_exec_page(void *src_start
, size_t length
,
166 unsigned long dst_addr
,
167 phys_addr_t
*phys_dst_addr
,
168 void *(*allocator
)(gfp_t mask
),
176 unsigned long dst
= (unsigned long)allocator(mask
);
183 memcpy((void *)dst
, src_start
, length
);
184 flush_icache_range(dst
, dst
+ length
);
186 pgd
= pgd_offset_raw(allocator(mask
), dst_addr
);
187 if (pgd_none(*pgd
)) {
188 pud
= allocator(mask
);
193 pgd_populate(&init_mm
, pgd
, pud
);
196 pud
= pud_offset(pgd
, dst_addr
);
197 if (pud_none(*pud
)) {
198 pmd
= allocator(mask
);
203 pud_populate(&init_mm
, pud
, pmd
);
206 pmd
= pmd_offset(pud
, dst_addr
);
207 if (pmd_none(*pmd
)) {
208 pte
= allocator(mask
);
213 pmd_populate_kernel(&init_mm
, pmd
, pte
);
216 pte
= pte_offset_kernel(pmd
, dst_addr
);
217 set_pte(pte
, __pte(virt_to_phys((void *)dst
) |
218 pgprot_val(PAGE_KERNEL_EXEC
)));
220 /* Load our new page tables */
221 asm volatile("msr ttbr0_el1, %0;"
225 "isb" : : "r"(virt_to_phys(pgd
)));
227 *phys_dst_addr
= virt_to_phys((void *)dst
);
234 int swsusp_arch_suspend(void)
238 struct sleep_stack_data state
;
240 if (cpus_are_stuck_in_kernel()) {
241 pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
245 local_dbg_save(flags
);
247 if (__cpu_suspend_enter(&state
)) {
250 /* Clean kernel to PoC for secondary core startup */
251 __flush_dcache_area(LMADDR(KERNEL_START
), KERNEL_END
- KERNEL_START
);
254 * Tell the hibernation core that we've just restored
259 __cpu_suspend_exit();
262 local_dbg_restore(flags
);
267 static int copy_pte(pmd_t
*dst_pmd
, pmd_t
*src_pmd
, unsigned long start
,
272 unsigned long addr
= start
;
274 dst_pte
= (pte_t
*)get_safe_page(GFP_ATOMIC
);
277 pmd_populate_kernel(&init_mm
, dst_pmd
, dst_pte
);
278 dst_pte
= pte_offset_kernel(dst_pmd
, start
);
280 src_pte
= pte_offset_kernel(src_pmd
, start
);
282 if (!pte_none(*src_pte
))
284 * Resume will overwrite areas that may be marked
285 * read only (code, rodata). Clear the RDONLY bit from
286 * the temporary mappings we use during restore.
288 set_pte(dst_pte
, __pte(pte_val(*src_pte
) & ~PTE_RDONLY
));
289 } while (dst_pte
++, src_pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
294 static int copy_pmd(pud_t
*dst_pud
, pud_t
*src_pud
, unsigned long start
,
300 unsigned long addr
= start
;
302 if (pud_none(*dst_pud
)) {
303 dst_pmd
= (pmd_t
*)get_safe_page(GFP_ATOMIC
);
306 pud_populate(&init_mm
, dst_pud
, dst_pmd
);
308 dst_pmd
= pmd_offset(dst_pud
, start
);
310 src_pmd
= pmd_offset(src_pud
, start
);
312 next
= pmd_addr_end(addr
, end
);
313 if (pmd_none(*src_pmd
))
315 if (pmd_table(*src_pmd
)) {
316 if (copy_pte(dst_pmd
, src_pmd
, addr
, next
))
320 __pmd(pmd_val(*src_pmd
) & ~PMD_SECT_RDONLY
));
322 } while (dst_pmd
++, src_pmd
++, addr
= next
, addr
!= end
);
327 static int copy_pud(pgd_t
*dst_pgd
, pgd_t
*src_pgd
, unsigned long start
,
333 unsigned long addr
= start
;
335 if (pgd_none(*dst_pgd
)) {
336 dst_pud
= (pud_t
*)get_safe_page(GFP_ATOMIC
);
339 pgd_populate(&init_mm
, dst_pgd
, dst_pud
);
341 dst_pud
= pud_offset(dst_pgd
, start
);
343 src_pud
= pud_offset(src_pgd
, start
);
345 next
= pud_addr_end(addr
, end
);
346 if (pud_none(*src_pud
))
348 if (pud_table(*(src_pud
))) {
349 if (copy_pmd(dst_pud
, src_pud
, addr
, next
))
353 __pud(pud_val(*src_pud
) & ~PMD_SECT_RDONLY
));
355 } while (dst_pud
++, src_pud
++, addr
= next
, addr
!= end
);
360 static int copy_page_tables(pgd_t
*dst_pgd
, unsigned long start
,
364 unsigned long addr
= start
;
365 pgd_t
*src_pgd
= pgd_offset_k(start
);
367 dst_pgd
= pgd_offset_raw(dst_pgd
, start
);
369 next
= pgd_addr_end(addr
, end
);
370 if (pgd_none(*src_pgd
))
372 if (copy_pud(dst_pgd
, src_pgd
, addr
, next
))
374 } while (dst_pgd
++, src_pgd
++, addr
= next
, addr
!= end
);
380 * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
382 * Memory allocated by get_safe_page() will be dealt with by the hibernate code,
383 * we don't need to free it here.
385 int swsusp_arch_resume(void)
391 void *lm_restore_pblist
;
392 phys_addr_t phys_hibernate_exit
;
393 void __noreturn (*hibernate_exit
)(phys_addr_t
, phys_addr_t
, void *,
394 void *, phys_addr_t
, phys_addr_t
);
397 * Locate the exit code in the bottom-but-one page, so that *NULL
398 * still has disastrous affects.
400 hibernate_exit
= (void *)PAGE_SIZE
;
401 exit_size
= __hibernate_exit_text_end
- __hibernate_exit_text_start
;
403 * Copy swsusp_arch_suspend_exit() to a safe page. This will generate
404 * a new set of ttbr0 page tables and load them.
406 rc
= create_safe_exec_page(__hibernate_exit_text_start
, exit_size
,
407 (unsigned long)hibernate_exit
,
408 &phys_hibernate_exit
,
409 (void *)get_safe_page
, GFP_ATOMIC
);
411 pr_err("Failed to create safe executable page for hibernate_exit code.");
416 * The hibernate exit text contains a set of el2 vectors, that will
417 * be executed at el2 with the mmu off in order to reload hyp-stub.
419 __flush_dcache_area(hibernate_exit
, exit_size
);
422 * Restoring the memory image will overwrite the ttbr1 page tables.
423 * Create a second copy of just the linear map, and use this when
426 tmp_pg_dir
= (pgd_t
*)get_safe_page(GFP_ATOMIC
);
428 pr_err("Failed to allocate memory for temporary page tables.");
432 rc
= copy_page_tables(tmp_pg_dir
, PAGE_OFFSET
, 0);
437 * Since we only copied the linear map, we need to find restore_pblist's
438 * linear map address.
440 lm_restore_pblist
= LMADDR(restore_pblist
);
443 * KASLR will cause the el2 vectors to be in a different location in
444 * the resumed kernel. Load hibernate's temporary copy into el2.
446 * We can skip this step if we booted at EL1, or are running with VHE.
448 if (el2_reset_needed()) {
449 phys_addr_t el2_vectors
= phys_hibernate_exit
; /* base */
450 el2_vectors
+= hibernate_el2_vectors
-
451 __hibernate_exit_text_start
; /* offset */
453 __hyp_set_vectors(el2_vectors
);
457 * We need a zero page that is zero before & after resume in order to
458 * to break before make on the ttbr1 page tables.
460 zero_page
= (void *)get_safe_page(GFP_ATOMIC
);
462 hibernate_exit(virt_to_phys(tmp_pg_dir
), resume_hdr
.ttbr1_el1
,
463 resume_hdr
.reenter_kernel
, lm_restore_pblist
,
464 resume_hdr
.__hyp_stub_vectors
, virt_to_phys(zero_page
));
470 static int check_boot_cpu_online_pm_callback(struct notifier_block
*nb
,
471 unsigned long action
, void *ptr
)
473 if (action
== PM_HIBERNATION_PREPARE
&&
474 cpumask_first(cpu_online_mask
) != 0) {
475 pr_warn("CPU0 is offline.\n");
476 return notifier_from_errno(-ENODEV
);
482 static int __init
check_boot_cpu_online_init(void)
485 * Set this pm_notifier callback with a lower priority than
486 * cpu_hotplug_pm_callback, so that cpu_hotplug_pm_callback will be
487 * called earlier to disable cpu hotplug before the cpu online check.
489 pm_notifier(check_boot_cpu_online_pm_callback
, -INT_MAX
);
493 core_initcall(check_boot_cpu_online_init
);