Commit | Line | Data |
---|---|---|
fce0d574 | 1 | /* |
3d1229d6 | 2 | * PPC64 code to handle Linux booting another kernel. |
fce0d574 S |
3 | * |
4 | * Copyright (C) 2004-2005, IBM Corp. | |
5 | * | |
6 | * Created by: Milton D Miller II | |
7 | * | |
8 | * This source code is licensed under the GNU General Public License, | |
9 | * Version 2. See the file COPYING for more details. | |
10 | */ | |
11 | ||
12 | ||
13 | #include <linux/cpumask.h> | |
14 | #include <linux/kexec.h> | |
15 | #include <linux/smp.h> | |
16 | #include <linux/thread_info.h> | |
17 | #include <linux/errno.h> | |
18 | ||
19 | #include <asm/page.h> | |
20 | #include <asm/current.h> | |
21 | #include <asm/machdep.h> | |
22 | #include <asm/cacheflush.h> | |
23 | #include <asm/paca.h> | |
24 | #include <asm/mmu.h> | |
25 | #include <asm/sections.h> /* _end */ | |
26 | #include <asm/prom.h> | |
2249ca9d | 27 | #include <asm/smp.h> |
fce0d574 | 28 | |
3d1229d6 | 29 | int default_machine_kexec_prepare(struct kimage *image) |
fce0d574 S |
30 | { |
31 | int i; | |
32 | unsigned long begin, end; /* limits of segment */ | |
33 | unsigned long low, high; /* limits of blocked memory range */ | |
34 | struct device_node *node; | |
35 | unsigned long *basep; | |
36 | unsigned int *sizep; | |
37 | ||
38 | if (!ppc_md.hpte_clear_all) | |
39 | return -ENOENT; | |
40 | ||
41 | /* | |
42 | * Since we use the kernel fault handlers and paging code to | |
43 | * handle the virtual mode, we must make sure no destination | |
44 | * overlaps kernel static data or bss. | |
45 | */ | |
72414d3f | 46 | for (i = 0; i < image->nr_segments; i++) |
fce0d574 S |
47 | if (image->segment[i].mem < __pa(_end)) |
48 | return -ETXTBSY; | |
49 | ||
50 | /* | |
51 | * For non-LPAR, we absolutely can not overwrite the mmu hash | |
52 | * table, since we are still using the bolted entries in it to | |
53 | * do the copy. Check that here. | |
54 | * | |
55 | * It is safe if the end is below the start of the blocked | |
56 | * region (end <= low), or if the beginning is after the | |
57 | * end of the blocked region (begin >= high). Use the | |
58 | * boolean identity !(a || b) === (!a && !b). | |
59 | */ | |
60 | if (htab_address) { | |
61 | low = __pa(htab_address); | |
337a7128 | 62 | high = low + htab_size_bytes; |
fce0d574 | 63 | |
72414d3f | 64 | for (i = 0; i < image->nr_segments; i++) { |
fce0d574 S |
65 | begin = image->segment[i].mem; |
66 | end = begin + image->segment[i].memsz; | |
67 | ||
68 | if ((begin < high) && (end > low)) | |
69 | return -ETXTBSY; | |
70 | } | |
71 | } | |
72 | ||
73 | /* We also should not overwrite the tce tables */ | |
74 | for (node = of_find_node_by_type(NULL, "pci"); node != NULL; | |
75 | node = of_find_node_by_type(node, "pci")) { | |
76 | basep = (unsigned long *)get_property(node, "linux,tce-base", | |
77 | NULL); | |
78 | sizep = (unsigned int *)get_property(node, "linux,tce-size", | |
79 | NULL); | |
80 | if (basep == NULL || sizep == NULL) | |
81 | continue; | |
82 | ||
83 | low = *basep; | |
84 | high = low + (*sizep); | |
85 | ||
72414d3f | 86 | for (i = 0; i < image->nr_segments; i++) { |
fce0d574 S |
87 | begin = image->segment[i].mem; |
88 | end = begin + image->segment[i].memsz; | |
89 | ||
90 | if ((begin < high) && (end > low)) | |
91 | return -ETXTBSY; | |
92 | } | |
93 | } | |
94 | ||
95 | return 0; | |
96 | } | |
97 | ||
fce0d574 S |
98 | #define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE) |
99 | ||
100 | static void copy_segments(unsigned long ind) | |
101 | { | |
102 | unsigned long entry; | |
103 | unsigned long *ptr; | |
104 | void *dest; | |
105 | void *addr; | |
106 | ||
107 | /* | |
108 | * We rely on kexec_load to create a lists that properly | |
109 | * initializes these pointers before they are used. | |
110 | * We will still crash if the list is wrong, but at least | |
111 | * the compiler will be quiet. | |
112 | */ | |
113 | ptr = NULL; | |
114 | dest = NULL; | |
115 | ||
116 | for (entry = ind; !(entry & IND_DONE); entry = *ptr++) { | |
117 | addr = __va(entry & PAGE_MASK); | |
118 | ||
119 | switch (entry & IND_FLAGS) { | |
120 | case IND_DESTINATION: | |
121 | dest = addr; | |
122 | break; | |
123 | case IND_INDIRECTION: | |
124 | ptr = addr; | |
125 | break; | |
126 | case IND_SOURCE: | |
127 | copy_page(dest, addr); | |
128 | dest += PAGE_SIZE; | |
129 | } | |
130 | } | |
131 | } | |
132 | ||
133 | void kexec_copy_flush(struct kimage *image) | |
134 | { | |
135 | long i, nr_segments = image->nr_segments; | |
136 | struct kexec_segment ranges[KEXEC_SEGMENT_MAX]; | |
137 | ||
138 | /* save the ranges on the stack to efficiently flush the icache */ | |
139 | memcpy(ranges, image->segment, sizeof(ranges)); | |
140 | ||
141 | /* | |
142 | * After this call we may not use anything allocated in dynamic | |
143 | * memory, including *image. | |
144 | * | |
145 | * Only globals and the stack are allowed. | |
146 | */ | |
147 | copy_segments(image->head); | |
148 | ||
149 | /* | |
150 | * we need to clear the icache for all dest pages sometime, | |
151 | * including ones that were in place on the original copy | |
152 | */ | |
153 | for (i = 0; i < nr_segments; i++) | |
b5666f70 ME |
154 | flush_icache_range((unsigned long)__va(ranges[i].mem), |
155 | (unsigned long)__va(ranges[i].mem + ranges[i].memsz)); | |
fce0d574 S |
156 | } |
157 | ||
158 | #ifdef CONFIG_SMP | |
159 | ||
160 | /* FIXME: we should schedule this function to be called on all cpus based | |
161 | * on calling the interrupts, but we would like to call it off irq level | |
162 | * so that the interrupt controller is clean. | |
163 | */ | |
164 | void kexec_smp_down(void *arg) | |
165 | { | |
c5e24354 ME |
166 | if (ppc_md.kexec_cpu_down) |
167 | ppc_md.kexec_cpu_down(0, 1); | |
fce0d574 S |
168 | |
169 | local_irq_disable(); | |
170 | kexec_smp_wait(); | |
171 | /* NOTREACHED */ | |
172 | } | |
173 | ||
174 | static void kexec_prepare_cpus(void) | |
175 | { | |
176 | int my_cpu, i, notified=-1; | |
177 | ||
178 | smp_call_function(kexec_smp_down, NULL, 0, /* wait */0); | |
179 | my_cpu = get_cpu(); | |
180 | ||
181 | /* check the others cpus are now down (via paca hw cpu id == -1) */ | |
182 | for (i=0; i < NR_CPUS; i++) { | |
183 | if (i == my_cpu) | |
184 | continue; | |
185 | ||
186 | while (paca[i].hw_cpu_id != -1) { | |
b3ca8093 | 187 | barrier(); |
fce0d574 S |
188 | if (!cpu_possible(i)) { |
189 | printk("kexec: cpu %d hw_cpu_id %d is not" | |
190 | " possible, ignoring\n", | |
191 | i, paca[i].hw_cpu_id); | |
192 | break; | |
193 | } | |
194 | if (!cpu_online(i)) { | |
195 | /* Fixme: this can be spinning in | |
196 | * pSeries_secondary_wait with a paca | |
197 | * waiting for it to go online. | |
198 | */ | |
199 | printk("kexec: cpu %d hw_cpu_id %d is not" | |
200 | " online, ignoring\n", | |
201 | i, paca[i].hw_cpu_id); | |
202 | break; | |
203 | } | |
204 | if (i != notified) { | |
205 | printk( "kexec: waiting for cpu %d (physical" | |
206 | " %d) to go down\n", | |
207 | i, paca[i].hw_cpu_id); | |
208 | notified = i; | |
209 | } | |
210 | } | |
211 | } | |
212 | ||
213 | /* after we tell the others to go down */ | |
c5e24354 ME |
214 | if (ppc_md.kexec_cpu_down) |
215 | ppc_md.kexec_cpu_down(0, 0); | |
fce0d574 S |
216 | |
217 | put_cpu(); | |
218 | ||
219 | local_irq_disable(); | |
220 | } | |
221 | ||
222 | #else /* ! SMP */ | |
223 | ||
224 | static void kexec_prepare_cpus(void) | |
225 | { | |
226 | /* | |
227 | * move the secondarys to us so that we can copy | |
228 | * the new kernel 0-0x100 safely | |
229 | * | |
230 | * do this if kexec in setup.c ? | |
75eedfed OJ |
231 | * |
232 | * We need to release the cpus if we are ever going from an | |
233 | * UP to an SMP kernel. | |
fce0d574 | 234 | */ |
75eedfed | 235 | smp_release_cpus(); |
c5e24354 ME |
236 | if (ppc_md.kexec_cpu_down) |
237 | ppc_md.kexec_cpu_down(0, 0); | |
fce0d574 S |
238 | local_irq_disable(); |
239 | } | |
240 | ||
241 | #endif /* SMP */ | |
242 | ||
243 | /* | |
244 | * kexec thread structure and stack. | |
245 | * | |
246 | * We need to make sure that this is 16384-byte aligned due to the | |
247 | * way process stacks are handled. It also must be statically allocated | |
248 | * or allocated as part of the kimage, because everything else may be | |
249 | * overwritten when we copy the kexec image. We piggyback on the | |
250 | * "init_task" linker section here to statically allocate a stack. | |
251 | * | |
252 | * We could use a smaller stack if we don't care about anything using | |
253 | * current, but that audit has not been performed. | |
254 | */ | |
255 | union thread_union kexec_stack | |
256 | __attribute__((__section__(".data.init_task"))) = { }; | |
257 | ||
258 | /* Our assembly helper, in kexec_stub.S */ | |
259 | extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start, | |
72414d3f MS |
260 | void *image, void *control, |
261 | void (*clear_all)(void)) ATTRIB_NORET; | |
fce0d574 S |
262 | |
263 | /* too late to fail here */ | |
3d1229d6 | 264 | void default_machine_kexec(struct kimage *image) |
fce0d574 | 265 | { |
fce0d574 S |
266 | /* prepare control code if any */ |
267 | ||
cc532915 ME |
268 | /* |
269 | * If the kexec boot is the normal one, need to shutdown other cpus | |
270 | * into our wait loop and quiesce interrupts. | |
271 | * Otherwise, in the case of crashed mode (crashing_cpu >= 0), | |
272 | * stopping other CPUs and collecting their pt_regs is done before | |
273 | * using debugger IPI. | |
274 | */ | |
275 | ||
276 | if (crashing_cpu == -1) | |
277 | kexec_prepare_cpus(); | |
fce0d574 S |
278 | |
279 | /* switch to a staticly allocated stack. Based on irq stack code. | |
280 | * XXX: the task struct will likely be invalid once we do the copy! | |
281 | */ | |
282 | kexec_stack.thread_info.task = current_thread_info()->task; | |
283 | kexec_stack.thread_info.flags = 0; | |
284 | ||
285 | /* Some things are best done in assembly. Finding globals with | |
286 | * a toc is easier in C, so pass in what we can. | |
287 | */ | |
288 | kexec_sequence(&kexec_stack, image->start, image, | |
289 | page_address(image->control_code_page), | |
290 | ppc_md.hpte_clear_all); | |
291 | /* NOTREACHED */ | |
292 | } | |
593e537b ME |
293 | |
294 | /* Values we need to export to the second kernel via the device tree. */ | |
337a7128 | 295 | static unsigned long htab_base, kernel_end; |
593e537b ME |
296 | |
297 | static struct property htab_base_prop = { | |
298 | .name = "linux,htab-base", | |
299 | .length = sizeof(unsigned long), | |
300 | .value = (unsigned char *)&htab_base, | |
301 | }; | |
302 | ||
303 | static struct property htab_size_prop = { | |
304 | .name = "linux,htab-size", | |
305 | .length = sizeof(unsigned long), | |
337a7128 | 306 | .value = (unsigned char *)&htab_size_bytes, |
593e537b ME |
307 | }; |
308 | ||
309 | static struct property kernel_end_prop = { | |
310 | .name = "linux,kernel-end", | |
311 | .length = sizeof(unsigned long), | |
312 | .value = (unsigned char *)&kernel_end, | |
313 | }; | |
314 | ||
315 | static void __init export_htab_values(void) | |
316 | { | |
317 | struct device_node *node; | |
318 | ||
319 | node = of_find_node_by_path("/chosen"); | |
320 | if (!node) | |
321 | return; | |
322 | ||
323 | kernel_end = __pa(_end); | |
324 | prom_add_property(node, &kernel_end_prop); | |
325 | ||
326 | /* On machines with no htab htab_address is NULL */ | |
327 | if (NULL == htab_address) | |
328 | goto out; | |
329 | ||
330 | htab_base = __pa(htab_address); | |
331 | prom_add_property(node, &htab_base_prop); | |
593e537b ME |
332 | prom_add_property(node, &htab_size_prop); |
333 | ||
334 | out: | |
335 | of_node_put(node); | |
336 | } | |
337 | ||
338 | void __init kexec_setup(void) | |
339 | { | |
340 | export_htab_values(); | |
341 | } | |
2babf5c2 ME |
342 | |
343 | int overlaps_crashkernel(unsigned long start, unsigned long size) | |
344 | { | |
345 | return (start + size) > crashk_res.start && start <= crashk_res.end; | |
346 | } |