Commit | Line | Data |
---|---|---|
d28f6df1 GL |
1 | /* |
2 | * kexec for arm64 | |
3 | * | |
4 | * Copyright (C) Linaro. | |
5 | * Copyright (C) Huawei Futurewei Technologies. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | ||
12 | #include <linux/kexec.h> | |
13 | #include <linux/linkage.h> | |
14 | ||
15 | #include <asm/assembler.h> | |
16 | #include <asm/kexec.h> | |
17 | #include <asm/page.h> | |
18 | #include <asm/sysreg.h> | |
19 | ||
20 | /* | |
21 | * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it. | |
22 | * | |
23 | * The memory that the old kernel occupies may be overwritten when coping the | |
24 | * new image to its final location. To assure that the | |
25 | * arm64_relocate_new_kernel routine which does that copy is not overwritten, | |
26 | * all code and data needed by arm64_relocate_new_kernel must be between the | |
27 | * symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end. The | |
28 | * machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec | |
29 | * control_code_page, a special page which has been set up to be preserved | |
30 | * during the copy operation. | |
31 | */ | |
32 | ENTRY(arm64_relocate_new_kernel) | |
33 | ||
34 | /* Setup the list loop variables. */ | |
35 | mov x17, x1 /* x17 = kimage_start */ | |
36 | mov x16, x0 /* x16 = kimage_head */ | |
072f0a63 | 37 | raw_dcache_line_size x15, x0 /* x15 = dcache line size */ |
d28f6df1 GL |
38 | mov x14, xzr /* x14 = entry ptr */ |
39 | mov x13, xzr /* x13 = copy dest */ | |
40 | ||
41 | /* Clear the sctlr_el2 flags. */ | |
42 | mrs x0, CurrentEL | |
43 | cmp x0, #CurrentEL_EL2 | |
44 | b.ne 1f | |
45 | mrs x0, sctlr_el2 | |
46 | ldr x1, =SCTLR_ELx_FLAGS | |
47 | bic x0, x0, x1 | |
48 | msr sctlr_el2, x0 | |
49 | isb | |
50 | 1: | |
51 | ||
52 | /* Check if the new image needs relocation. */ | |
53 | tbnz x16, IND_DONE_BIT, .Ldone | |
54 | ||
55 | .Lloop: | |
56 | and x12, x16, PAGE_MASK /* x12 = addr */ | |
57 | ||
58 | /* Test the entry flags. */ | |
59 | .Ltest_source: | |
60 | tbz x16, IND_SOURCE_BIT, .Ltest_indirection | |
61 | ||
62 | /* Invalidate dest page to PoC. */ | |
63 | mov x0, x13 | |
64 | add x20, x0, #PAGE_SIZE | |
65 | sub x1, x15, #1 | |
66 | bic x0, x0, x1 | |
67 | 2: dc ivac, x0 | |
68 | add x0, x0, x15 | |
69 | cmp x0, x20 | |
70 | b.lo 2b | |
71 | dsb sy | |
72 | ||
73 | mov x20, x13 | |
74 | mov x21, x12 | |
75 | copy_page x20, x21, x0, x1, x2, x3, x4, x5, x6, x7 | |
76 | ||
77 | /* dest += PAGE_SIZE */ | |
78 | add x13, x13, PAGE_SIZE | |
79 | b .Lnext | |
80 | ||
81 | .Ltest_indirection: | |
82 | tbz x16, IND_INDIRECTION_BIT, .Ltest_destination | |
83 | ||
84 | /* ptr = addr */ | |
85 | mov x14, x12 | |
86 | b .Lnext | |
87 | ||
88 | .Ltest_destination: | |
89 | tbz x16, IND_DESTINATION_BIT, .Lnext | |
90 | ||
91 | /* dest = addr */ | |
92 | mov x13, x12 | |
93 | ||
94 | .Lnext: | |
95 | /* entry = *ptr++ */ | |
96 | ldr x16, [x14], #8 | |
97 | ||
98 | /* while (!(entry & DONE)) */ | |
99 | tbz x16, IND_DONE_BIT, .Lloop | |
100 | ||
101 | .Ldone: | |
102 | /* wait for writes from copy_page to finish */ | |
103 | dsb nsh | |
104 | ic iallu | |
105 | dsb nsh | |
106 | isb | |
107 | ||
108 | /* Start new image. */ | |
109 | mov x0, xzr | |
110 | mov x1, xzr | |
111 | mov x2, xzr | |
112 | mov x3, xzr | |
113 | br x17 | |
114 | ||
115 | ENDPROC(arm64_relocate_new_kernel) | |
116 | ||
117 | .ltorg | |
118 | ||
119 | .align 3 /* To keep the 64-bit values below naturally aligned. */ | |
120 | ||
121 | .Lcopy_end: | |
122 | .org KEXEC_CONTROL_PAGE_SIZE | |
123 | ||
124 | /* | |
125 | * arm64_relocate_new_kernel_size - Number of bytes to copy to the | |
126 | * control_code_page. | |
127 | */ | |
128 | .globl arm64_relocate_new_kernel_size | |
129 | arm64_relocate_new_kernel_size: | |
130 | .quad .Lcopy_end - arm64_relocate_new_kernel |