2 * PPC64 code to handle Linux booting another kernel.
4 * Copyright (C) 2004-2005, IBM Corp.
6 * Created by: Milton D Miller II
8 * This source code is licensed under the GNU General Public License,
9 * Version 2. See the file COPYING for more details.
13 #include <linux/kexec.h>
14 #include <linux/smp.h>
15 #include <linux/thread_info.h>
16 #include <linux/init_task.h>
17 #include <linux/errno.h>
20 #include <asm/current.h>
21 #include <asm/machdep.h>
22 #include <asm/cacheflush.h>
25 #include <asm/sections.h> /* _end */
29 int default_machine_kexec_prepare(struct kimage *image)
32 unsigned long begin, end; /* limits of segment */
33 unsigned long low, high; /* limits of blocked memory range */
34 struct device_node *node;
35 const unsigned long *basep;
36 const unsigned int *sizep;
38 if (!ppc_md.hpte_clear_all)
42 * Since we use the kernel fault handlers and paging code to
43 * handle the virtual mode, we must make sure no destination
44 * overlaps kernel static data or bss.
46 for (i = 0; i < image->nr_segments; i++)
47 if (image->segment[i].mem < __pa(_end))
51 * For non-LPAR, we absolutely can not overwrite the mmu hash
52 * table, since we are still using the bolted entries in it to
53 * do the copy. Check that here.
55 * It is safe if the end is below the start of the blocked
56 * region (end <= low), or if the beginning is after the
57 * end of the blocked region (begin >= high). Use the
58 * boolean identity !(a || b) === (!a && !b).
61 low = __pa(htab_address);
62 high = low + htab_size_bytes;
64 for (i = 0; i < image->nr_segments; i++) {
65 begin = image->segment[i].mem;
66 end = begin + image->segment[i].memsz;
68 if ((begin < high) && (end > low))
73 /* We also should not overwrite the tce tables */
74 for (node = of_find_node_by_type(NULL, "pci"); node != NULL;
75 node = of_find_node_by_type(node, "pci")) {
76 basep = of_get_property(node, "linux,tce-base", NULL);
77 sizep = of_get_property(node, "linux,tce-size", NULL);
78 if (basep == NULL || sizep == NULL)
82 high = low + (*sizep);
84 for (i = 0; i < image->nr_segments; i++) {
85 begin = image->segment[i].mem;
86 end = begin + image->segment[i].memsz;
88 if ((begin < high) && (end > low))
96 #define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE)
98 static void copy_segments(unsigned long ind)
106 * We rely on kexec_load to create a lists that properly
107 * initializes these pointers before they are used.
108 * We will still crash if the list is wrong, but at least
109 * the compiler will be quiet.
114 for (entry = ind; !(entry & IND_DONE); entry = *ptr++) {
115 addr = __va(entry & PAGE_MASK);
117 switch (entry & IND_FLAGS) {
118 case IND_DESTINATION:
121 case IND_INDIRECTION:
125 copy_page(dest, addr);
131 void kexec_copy_flush(struct kimage *image)
133 long i, nr_segments = image->nr_segments;
134 struct kexec_segment ranges[KEXEC_SEGMENT_MAX];
136 /* save the ranges on the stack to efficiently flush the icache */
137 memcpy(ranges, image->segment, sizeof(ranges));
140 * After this call we may not use anything allocated in dynamic
141 * memory, including *image.
143 * Only globals and the stack are allowed.
145 copy_segments(image->head);
148 * we need to clear the icache for all dest pages sometime,
149 * including ones that were in place on the original copy
151 for (i = 0; i < nr_segments; i++)
152 flush_icache_range((unsigned long)__va(ranges[i].mem),
153 (unsigned long)__va(ranges[i].mem + ranges[i].memsz));
158 static int kexec_all_irq_disabled = 0;
160 static void kexec_smp_down(void *arg)
163 mb(); /* make sure our irqs are disabled before we say they are */
164 get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
165 while(kexec_all_irq_disabled == 0)
167 mb(); /* make sure all irqs are disabled before this */
169 * Now every CPU has IRQs off, we can clear out any pending
170 * IPIs and be sure that no more will come in after this.
172 if (ppc_md.kexec_cpu_down)
173 ppc_md.kexec_cpu_down(0, 1);
179 static void kexec_prepare_cpus_wait(int wait_state)
181 int my_cpu, i, notified=-1;
184 /* Make sure each CPU has atleast made it to the state we need */
185 for (i=0; i < NR_CPUS; i++) {
189 while (paca[i].kexec_state < wait_state) {
191 if (!cpu_possible(i)) {
192 printk("kexec: cpu %d hw_cpu_id %d is not"
193 " possible, ignoring\n",
194 i, paca[i].hw_cpu_id);
197 if (!cpu_online(i)) {
198 /* Fixme: this can be spinning in
199 * pSeries_secondary_wait with a paca
200 * waiting for it to go online.
202 printk("kexec: cpu %d hw_cpu_id %d is not"
203 " online, ignoring\n",
204 i, paca[i].hw_cpu_id);
208 printk( "kexec: waiting for cpu %d (physical"
209 " %d) to enter %i state\n",
210 i, paca[i].hw_cpu_id, wait_state);
218 static void kexec_prepare_cpus(void)
221 smp_call_function(kexec_smp_down, NULL, /* wait */0);
223 mb(); /* make sure IRQs are disabled before we say they are */
224 get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
226 kexec_prepare_cpus_wait(KEXEC_STATE_IRQS_OFF);
227 /* we are sure every CPU has IRQs off at this point */
228 kexec_all_irq_disabled = 1;
230 /* after we tell the others to go down */
231 if (ppc_md.kexec_cpu_down)
232 ppc_md.kexec_cpu_down(0, 0);
234 /* Before removing MMU mapings make sure all CPUs have entered real mode */
235 kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE);
242 static void kexec_prepare_cpus(void)
245 * move the secondarys to us so that we can copy
246 * the new kernel 0-0x100 safely
248 * do this if kexec in setup.c ?
250 * We need to release the cpus if we are ever going from an
251 * UP to an SMP kernel.
254 if (ppc_md.kexec_cpu_down)
255 ppc_md.kexec_cpu_down(0, 0);
262 * kexec thread structure and stack.
264 * We need to make sure that this is 16384-byte aligned due to the
265 * way process stacks are handled. It also must be statically allocated
266 * or allocated as part of the kimage, because everything else may be
267 * overwritten when we copy the kexec image. We piggyback on the
268 * "init_task" linker section here to statically allocate a stack.
270 * We could use a smaller stack if we don't care about anything using
271 * current, but that audit has not been performed.
273 static union thread_union kexec_stack __init_task_data =
276 /* Our assembly helper, in kexec_stub.S */
277 extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start,
278 void *image, void *control,
279 void (*clear_all)(void)) ATTRIB_NORET;
281 /* too late to fail here */
282 void default_machine_kexec(struct kimage *image)
284 /* prepare control code if any */
287 * If the kexec boot is the normal one, need to shutdown other cpus
288 * into our wait loop and quiesce interrupts.
289 * Otherwise, in the case of crashed mode (crashing_cpu >= 0),
290 * stopping other CPUs and collecting their pt_regs is done before
291 * using debugger IPI.
294 if (crashing_cpu == -1)
295 kexec_prepare_cpus();
297 /* switch to a staticly allocated stack. Based on irq stack code.
298 * XXX: the task struct will likely be invalid once we do the copy!
300 kexec_stack.thread_info.task = current_thread_info()->task;
301 kexec_stack.thread_info.flags = 0;
303 /* Some things are best done in assembly. Finding globals with
304 * a toc is easier in C, so pass in what we can.
306 kexec_sequence(&kexec_stack, image->start, image,
307 page_address(image->control_code_page),
308 ppc_md.hpte_clear_all);
312 /* Values we need to export to the second kernel via the device tree. */
313 static unsigned long htab_base;
315 static struct property htab_base_prop = {
316 .name = "linux,htab-base",
317 .length = sizeof(unsigned long),
321 static struct property htab_size_prop = {
322 .name = "linux,htab-size",
323 .length = sizeof(unsigned long),
324 .value = &htab_size_bytes,
327 static int __init export_htab_values(void)
329 struct device_node *node;
330 struct property *prop;
332 /* On machines with no htab htab_address is NULL */
336 node = of_find_node_by_path("/chosen");
340 /* remove any stale propertys so ours can be found */
341 prop = of_find_property(node, htab_base_prop.name, NULL);
343 prom_remove_property(node, prop);
344 prop = of_find_property(node, htab_size_prop.name, NULL);
346 prom_remove_property(node, prop);
348 htab_base = __pa(htab_address);
349 prom_add_property(node, &htab_base_prop);
350 prom_add_property(node, &htab_size_prop);
355 late_initcall(export_htab_values);