aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/machine_kexec.c
diff options
context:
space:
mode:
authorMagnus Damm <damm@igel.co.jp>2009-03-18 04:51:29 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-03-18 05:57:43 -0400
commitb7cf6ddc13186f9272438a97aa75972d496d0b0a (patch)
treef0b7e65b92a573dd10fc97b9bad0d832ed45f64b /arch/sh/kernel/machine_kexec.c
parente4e063d0c288bd65c56dd855337780a541ed928d (diff)
sh: add kexec jump support
Add kexec jump support to the SuperH architecture. Similar to the x86 implementation, with the following exceptions: - Instead of separating the assembly code flow into two parts for regular kexec and kexec jump we use a single code path. In the assembly snippet regular kexec is just kexec jump that never comes back. - Instead of using a swap page when moving data between pages the page copy assembly routine has been modified to exchange the data between the pages using registers. - We walk the page list twice in machine_kexec() to do and undo physical to virtual address conversion. Signed-off-by: Magnus Damm <damm@igel.co.jp> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel/machine_kexec.c')
-rw-r--r--arch/sh/kernel/machine_kexec.c32
1 files changed, 27 insertions, 5 deletions
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index 25b4748fdc7b..c44efb73ab1a 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -14,20 +14,21 @@
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/reboot.h> 15#include <linux/reboot.h>
16#include <linux/numa.h> 16#include <linux/numa.h>
17#include <linux/suspend.h>
17#include <asm/pgtable.h> 18#include <asm/pgtable.h>
18#include <asm/pgalloc.h> 19#include <asm/pgalloc.h>
19#include <asm/mmu_context.h> 20#include <asm/mmu_context.h>
20#include <asm/io.h> 21#include <asm/io.h>
21#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
22 23
23typedef NORET_TYPE void (*relocate_new_kernel_t)( 24typedef void (*relocate_new_kernel_t)(unsigned long indirection_page,
24 unsigned long indirection_page, 25 unsigned long reboot_code_buffer,
25 unsigned long reboot_code_buffer, 26 unsigned long start_address);
26 unsigned long start_address) ATTRIB_NORET;
27 27
28extern const unsigned char relocate_new_kernel[]; 28extern const unsigned char relocate_new_kernel[];
29extern const unsigned int relocate_new_kernel_size; 29extern const unsigned int relocate_new_kernel_size;
30extern void *gdb_vbr_vector; 30extern void *gdb_vbr_vector;
31extern void *vbr_base;
31 32
32void machine_shutdown(void) 33void machine_shutdown(void)
33{ 34{
@@ -72,7 +73,6 @@ static void kexec_info(struct kimage *image)
72 */ 73 */
73void machine_kexec(struct kimage *image) 74void machine_kexec(struct kimage *image)
74{ 75{
75
76 unsigned long page_list; 76 unsigned long page_list;
77 unsigned long reboot_code_buffer; 77 unsigned long reboot_code_buffer;
78 relocate_new_kernel_t rnk; 78 relocate_new_kernel_t rnk;
@@ -92,6 +92,11 @@ void machine_kexec(struct kimage *image)
92 *ptr = (unsigned long) phys_to_virt(*ptr); 92 *ptr = (unsigned long) phys_to_virt(*ptr);
93 } 93 }
94 94
95#ifdef CONFIG_KEXEC_JUMP
96 if (image->preserve_context)
97 save_processor_state();
98#endif
99
95 /* Interrupts aren't acceptable while we reboot */ 100 /* Interrupts aren't acceptable while we reboot */
96 local_irq_disable(); 101 local_irq_disable();
97 102
@@ -117,6 +122,23 @@ void machine_kexec(struct kimage *image)
117 /* now call it */ 122 /* now call it */
118 rnk = (relocate_new_kernel_t) reboot_code_buffer; 123 rnk = (relocate_new_kernel_t) reboot_code_buffer;
119 (*rnk)(page_list, reboot_code_buffer, image->start); 124 (*rnk)(page_list, reboot_code_buffer, image->start);
125
126#ifdef CONFIG_KEXEC_JUMP
127 asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory");
128 local_irq_disable();
129 clear_bl_bit();
130 if (image->preserve_context)
131 restore_processor_state();
132
133 /* Convert page list back to physical addresses, what a mess. */
134 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
135 ptr = (*ptr & IND_INDIRECTION) ?
136 phys_to_virt(*ptr & PAGE_MASK) : ptr + 1) {
137 if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
138 *ptr & IND_DESTINATION)
139 *ptr = virt_to_phys(*ptr);
140 }
141#endif
120} 142}
121 143
122void arch_crash_save_vmcoreinfo(void) 144void arch_crash_save_vmcoreinfo(void)