diff options
author | Magnus Damm <damm@igel.co.jp> | 2009-03-18 04:51:29 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-03-18 05:57:43 -0400 |
commit | b7cf6ddc13186f9272438a97aa75972d496d0b0a (patch) | |
tree | f0b7e65b92a573dd10fc97b9bad0d832ed45f64b /arch/sh | |
parent | e4e063d0c288bd65c56dd855337780a541ed928d (diff) |
sh: add kexec jump support
Add kexec jump support to the SuperH architecture.
Similar to the x86 implementation, with the following
exceptions:
- Instead of separating the assembly code flow into
two parts for regular kexec and kexec jump we use a
single code path. In the assembly snippet regular
kexec is just kexec jump that never comes back.
- Instead of using a swap page when moving data between
pages the page copy assembly routine has been modified
to exchange the data between the pages using registers.
- We walk the page list twice in machine_kexec() to
do and undo physical to virtual address conversion.
Signed-off-by: Magnus Damm <damm@igel.co.jp>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/Kconfig | 7 | ||||
-rw-r--r-- | arch/sh/kernel/machine_kexec.c | 32 | ||||
-rw-r--r-- | arch/sh/kernel/relocate_kernel.S | 195 |
3 files changed, 202 insertions, 32 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 6c56495fd158..8d50d527c595 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -559,6 +559,13 @@ config CRASH_DUMP | |||
559 | 559 | ||
560 | For more details see Documentation/kdump/kdump.txt | 560 | For more details see Documentation/kdump/kdump.txt |
561 | 561 | ||
562 | config KEXEC_JUMP | ||
563 | bool "kexec jump (EXPERIMENTAL)" | ||
564 | depends on SUPERH32 && KEXEC && HIBERNATION && EXPERIMENTAL | ||
565 | help | ||
566 | Jump between original kernel and kexeced kernel and invoke | ||
567 | code via KEXEC | ||
568 | |||
562 | config SECCOMP | 569 | config SECCOMP |
563 | bool "Enable seccomp to safely compute untrusted bytecode" | 570 | bool "Enable seccomp to safely compute untrusted bytecode" |
564 | depends on PROC_FS | 571 | depends on PROC_FS |
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index 25b4748fdc7b..c44efb73ab1a 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c | |||
@@ -14,20 +14,21 @@ | |||
14 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
15 | #include <linux/reboot.h> | 15 | #include <linux/reboot.h> |
16 | #include <linux/numa.h> | 16 | #include <linux/numa.h> |
17 | #include <linux/suspend.h> | ||
17 | #include <asm/pgtable.h> | 18 | #include <asm/pgtable.h> |
18 | #include <asm/pgalloc.h> | 19 | #include <asm/pgalloc.h> |
19 | #include <asm/mmu_context.h> | 20 | #include <asm/mmu_context.h> |
20 | #include <asm/io.h> | 21 | #include <asm/io.h> |
21 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
22 | 23 | ||
23 | typedef NORET_TYPE void (*relocate_new_kernel_t)( | 24 | typedef void (*relocate_new_kernel_t)(unsigned long indirection_page, |
24 | unsigned long indirection_page, | 25 | unsigned long reboot_code_buffer, |
25 | unsigned long reboot_code_buffer, | 26 | unsigned long start_address); |
26 | unsigned long start_address) ATTRIB_NORET; | ||
27 | 27 | ||
28 | extern const unsigned char relocate_new_kernel[]; | 28 | extern const unsigned char relocate_new_kernel[]; |
29 | extern const unsigned int relocate_new_kernel_size; | 29 | extern const unsigned int relocate_new_kernel_size; |
30 | extern void *gdb_vbr_vector; | 30 | extern void *gdb_vbr_vector; |
31 | extern void *vbr_base; | ||
31 | 32 | ||
32 | void machine_shutdown(void) | 33 | void machine_shutdown(void) |
33 | { | 34 | { |
@@ -72,7 +73,6 @@ static void kexec_info(struct kimage *image) | |||
72 | */ | 73 | */ |
73 | void machine_kexec(struct kimage *image) | 74 | void machine_kexec(struct kimage *image) |
74 | { | 75 | { |
75 | |||
76 | unsigned long page_list; | 76 | unsigned long page_list; |
77 | unsigned long reboot_code_buffer; | 77 | unsigned long reboot_code_buffer; |
78 | relocate_new_kernel_t rnk; | 78 | relocate_new_kernel_t rnk; |
@@ -92,6 +92,11 @@ void machine_kexec(struct kimage *image) | |||
92 | *ptr = (unsigned long) phys_to_virt(*ptr); | 92 | *ptr = (unsigned long) phys_to_virt(*ptr); |
93 | } | 93 | } |
94 | 94 | ||
95 | #ifdef CONFIG_KEXEC_JUMP | ||
96 | if (image->preserve_context) | ||
97 | save_processor_state(); | ||
98 | #endif | ||
99 | |||
95 | /* Interrupts aren't acceptable while we reboot */ | 100 | /* Interrupts aren't acceptable while we reboot */ |
96 | local_irq_disable(); | 101 | local_irq_disable(); |
97 | 102 | ||
@@ -117,6 +122,23 @@ void machine_kexec(struct kimage *image) | |||
117 | /* now call it */ | 122 | /* now call it */ |
118 | rnk = (relocate_new_kernel_t) reboot_code_buffer; | 123 | rnk = (relocate_new_kernel_t) reboot_code_buffer; |
119 | (*rnk)(page_list, reboot_code_buffer, image->start); | 124 | (*rnk)(page_list, reboot_code_buffer, image->start); |
125 | |||
126 | #ifdef CONFIG_KEXEC_JUMP | ||
127 | asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory"); | ||
128 | local_irq_disable(); | ||
129 | clear_bl_bit(); | ||
130 | if (image->preserve_context) | ||
131 | restore_processor_state(); | ||
132 | |||
133 | /* Convert page list back to physical addresses, what a mess. */ | ||
134 | for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); | ||
135 | ptr = (*ptr & IND_INDIRECTION) ? | ||
136 | phys_to_virt(*ptr & PAGE_MASK) : ptr + 1) { | ||
137 | if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION || | ||
138 | *ptr & IND_DESTINATION) | ||
139 | *ptr = virt_to_phys(*ptr); | ||
140 | } | ||
141 | #endif | ||
120 | } | 142 | } |
121 | 143 | ||
122 | void arch_crash_save_vmcoreinfo(void) | 144 | void arch_crash_save_vmcoreinfo(void) |
diff --git a/arch/sh/kernel/relocate_kernel.S b/arch/sh/kernel/relocate_kernel.S index 2a6630be668c..fcc9934fb97b 100644 --- a/arch/sh/kernel/relocate_kernel.S +++ b/arch/sh/kernel/relocate_kernel.S | |||
@@ -4,6 +4,8 @@ | |||
4 | * | 4 | * |
5 | * LANDISK/sh4 is supported. Maybe, SH archtecture works well. | 5 | * LANDISK/sh4 is supported. Maybe, SH archtecture works well. |
6 | * | 6 | * |
7 | * 2009-03-18 Magnus Damm - Added Kexec Jump support | ||
8 | * | ||
7 | * This source code is licensed under the GNU General Public License, | 9 | * This source code is licensed under the GNU General Public License, |
8 | * Version 2. See the file COPYING for more details. | 10 | * Version 2. See the file COPYING for more details. |
9 | */ | 11 | */ |
@@ -17,14 +19,135 @@ relocate_new_kernel: | |||
17 | /* r5 = reboot_code_buffer */ | 19 | /* r5 = reboot_code_buffer */ |
18 | /* r6 = start_address */ | 20 | /* r6 = start_address */ |
19 | 21 | ||
20 | mov.l 10f,r8 /* PAGE_SIZE */ | 22 | mov.l 10f, r0 /* PAGE_SIZE */ |
23 | add r5, r0 /* setup new stack at end of control page */ | ||
24 | |||
25 | /* save r15->r8 to new stack */ | ||
26 | mov.l r15, @-r0 | ||
27 | mov r0, r15 | ||
28 | mov.l r14, @-r15 | ||
29 | mov.l r13, @-r15 | ||
30 | mov.l r12, @-r15 | ||
31 | mov.l r11, @-r15 | ||
32 | mov.l r10, @-r15 | ||
33 | mov.l r9, @-r15 | ||
34 | mov.l r8, @-r15 | ||
35 | |||
36 | /* save other random registers */ | ||
37 | sts.l macl, @-r15 | ||
38 | sts.l mach, @-r15 | ||
39 | stc.l gbr, @-r15 | ||
40 | stc.l ssr, @-r15 | ||
41 | stc.l sr, @-r15 | ||
42 | sts.l pr, @-r15 | ||
43 | stc.l spc, @-r15 | ||
44 | |||
45 | /* switch to bank1 and save r7->r0 */ | ||
46 | mov.l 12f, r9 | ||
47 | stc sr, r8 | ||
48 | or r9, r8 | ||
49 | ldc r8, sr | ||
50 | mov.l r7, @-r15 | ||
51 | mov.l r6, @-r15 | ||
52 | mov.l r5, @-r15 | ||
53 | mov.l r4, @-r15 | ||
54 | mov.l r3, @-r15 | ||
55 | mov.l r2, @-r15 | ||
56 | mov.l r1, @-r15 | ||
57 | mov.l r0, @-r15 | ||
58 | |||
59 | /* switch to bank0 and save r7->r0 */ | ||
60 | mov.l 12f, r9 | ||
61 | not r9, r9 | ||
62 | stc sr, r8 | ||
63 | and r9, r8 | ||
64 | ldc r8, sr | ||
65 | mov.l r7, @-r15 | ||
66 | mov.l r6, @-r15 | ||
67 | mov.l r5, @-r15 | ||
68 | mov.l r4, @-r15 | ||
69 | mov.l r3, @-r15 | ||
70 | mov.l r2, @-r15 | ||
71 | mov.l r1, @-r15 | ||
72 | mov.l r0, @-r15 | ||
73 | |||
74 | mov.l r4, @-r15 /* save indirection page again */ | ||
75 | |||
76 | bsr swap_pages /* swap pages before jumping to new kernel */ | ||
77 | nop | ||
78 | |||
79 | mova 11f, r0 | ||
80 | mov.l r15, @r0 /* save pointer to stack */ | ||
81 | |||
82 | jsr @r6 /* hand over control to new kernel */ | ||
83 | nop | ||
84 | |||
85 | mov.l 11f, r15 /* get pointer to stack */ | ||
86 | mov.l @r15+, r4 /* restore r4 to get indirection page */ | ||
21 | 87 | ||
22 | /* stack setting */ | 88 | bsr swap_pages /* swap pages back to previous state */ |
23 | add r8,r5 | 89 | nop |
24 | mov r5,r15 | ||
25 | 90 | ||
91 | /* make sure bank0 is active and restore r0->r7 */ | ||
92 | mov.l 12f, r9 | ||
93 | not r9, r9 | ||
94 | stc sr, r8 | ||
95 | and r9, r8 | ||
96 | ldc r8, sr | ||
97 | mov.l @r15+, r0 | ||
98 | mov.l @r15+, r1 | ||
99 | mov.l @r15+, r2 | ||
100 | mov.l @r15+, r3 | ||
101 | mov.l @r15+, r4 | ||
102 | mov.l @r15+, r5 | ||
103 | mov.l @r15+, r6 | ||
104 | mov.l @r15+, r7 | ||
105 | |||
106 | /* switch to bank1 and restore r0->r7 */ | ||
107 | mov.l 12f, r9 | ||
108 | stc sr, r8 | ||
109 | or r9, r8 | ||
110 | ldc r8, sr | ||
111 | mov.l @r15+, r0 | ||
112 | mov.l @r15+, r1 | ||
113 | mov.l @r15+, r2 | ||
114 | mov.l @r15+, r3 | ||
115 | mov.l @r15+, r4 | ||
116 | mov.l @r15+, r5 | ||
117 | mov.l @r15+, r6 | ||
118 | mov.l @r15+, r7 | ||
119 | |||
120 | /* switch back to bank0 */ | ||
121 | mov.l 12f, r9 | ||
122 | not r9, r9 | ||
123 | stc sr, r8 | ||
124 | and r9, r8 | ||
125 | ldc r8, sr | ||
126 | |||
127 | /* restore other random registers */ | ||
128 | ldc.l @r15+, spc | ||
129 | lds.l @r15+, pr | ||
130 | ldc.l @r15+, sr | ||
131 | ldc.l @r15+, ssr | ||
132 | ldc.l @r15+, gbr | ||
133 | lds.l @r15+, mach | ||
134 | lds.l @r15+, macl | ||
135 | |||
136 | /* restore r8->r15 */ | ||
137 | mov.l @r15+, r8 | ||
138 | mov.l @r15+, r9 | ||
139 | mov.l @r15+, r10 | ||
140 | mov.l @r15+, r11 | ||
141 | mov.l @r15+, r12 | ||
142 | mov.l @r15+, r13 | ||
143 | mov.l @r15+, r14 | ||
144 | mov.l @r15+, r15 | ||
145 | rts | ||
146 | nop | ||
147 | |||
148 | swap_pages: | ||
26 | bra 1f | 149 | bra 1f |
27 | mov r4,r0 /* cmd = indirection_page */ | 150 | mov r4,r0 /* cmd = indirection_page */ |
28 | 0: | 151 | 0: |
29 | mov.l @r4+,r0 /* cmd = *ind++ */ | 152 | mov.l @r4+,r0 /* cmd = *ind++ */ |
30 | 153 | ||
@@ -37,52 +160,70 @@ relocate_new_kernel: | |||
37 | tst #1,r0 | 160 | tst #1,r0 |
38 | bt 2f | 161 | bt 2f |
39 | bra 0b | 162 | bra 0b |
40 | mov r2,r5 | 163 | mov r2,r5 |
41 | 164 | ||
42 | 2: /* else if(cmd & IND_INDIRECTION) ind = addr */ | 165 | 2: /* else if(cmd & IND_INDIRECTION) ind = addr */ |
43 | tst #2,r0 | 166 | tst #2,r0 |
44 | bt 3f | 167 | bt 3f |
45 | bra 0b | 168 | bra 0b |
46 | mov r2,r4 | 169 | mov r2,r4 |
47 | 170 | ||
48 | 3: /* else if(cmd & IND_DONE) goto 6 */ | 171 | 3: /* else if(cmd & IND_DONE) return */ |
49 | tst #4,r0 | 172 | tst #4,r0 |
50 | bt 4f | 173 | bt 4f |
51 | bra 6f | 174 | rts |
52 | nop | 175 | nop |
53 | 176 | ||
54 | 4: /* else if(cmd & IND_SOURCE) memcpy(dst,addr,PAGE_SIZE) */ | 177 | 4: /* else if(cmd & IND_SOURCE) memcpy(dst,addr,PAGE_SIZE) */ |
55 | tst #8,r0 | 178 | tst #8,r0 |
56 | bt 0b | 179 | bt 0b |
57 | 180 | ||
58 | mov r8,r3 | 181 | mov.l 10f,r3 /* PAGE_SIZE */ |
59 | shlr2 r3 | 182 | shlr2 r3 |
60 | shlr2 r3 | 183 | shlr2 r3 |
61 | 5: | 184 | 5: |
62 | dt r3 | 185 | dt r3 |
63 | mov.l @r2+,r1 /* 16n+0 */ | 186 | |
64 | mov.l r1,@r5 | 187 | /* regular kexec just overwrites the destination page |
65 | add #4,r5 | 188 | * with the contents of the source page. |
66 | mov.l @r2+,r1 /* 16n+4 */ | 189 | * for the kexec jump case we need to swap the contents |
67 | mov.l r1,@r5 | 190 | * of the pages. |
68 | add #4,r5 | 191 | * to keep it simple swap the contents for both cases. |
69 | mov.l @r2+,r1 /* 16n+8 */ | 192 | */ |
70 | mov.l r1,@r5 | 193 | mov.l @(0, r2), r8 |
71 | add #4,r5 | 194 | mov.l @(0, r5), r1 |
72 | mov.l @r2+,r1 /* 16n+12 */ | 195 | mov.l r8, @(0, r5) |
73 | mov.l r1,@r5 | 196 | mov.l r1, @(0, r2) |
74 | add #4,r5 | 197 | |
198 | mov.l @(4, r2), r8 | ||
199 | mov.l @(4, r5), r1 | ||
200 | mov.l r8, @(4, r5) | ||
201 | mov.l r1, @(4, r2) | ||
202 | |||
203 | mov.l @(8, r2), r8 | ||
204 | mov.l @(8, r5), r1 | ||
205 | mov.l r8, @(8, r5) | ||
206 | mov.l r1, @(8, r2) | ||
207 | |||
208 | mov.l @(12, r2), r8 | ||
209 | mov.l @(12, r5), r1 | ||
210 | mov.l r8, @(12, r5) | ||
211 | mov.l r1, @(12, r2) | ||
212 | |||
213 | add #16,r5 | ||
214 | add #16,r2 | ||
75 | bf 5b | 215 | bf 5b |
76 | 216 | ||
77 | bra 0b | 217 | bra 0b |
78 | nop | 218 | nop |
79 | 6: | ||
80 | jmp @r6 | ||
81 | nop | ||
82 | 219 | ||
83 | .align 2 | 220 | .align 2 |
84 | 10: | 221 | 10: |
85 | .long PAGE_SIZE | 222 | .long PAGE_SIZE |
223 | 11: | ||
224 | .long 0 | ||
225 | 12: | ||
226 | .long 0x20000000 ! RB=1 | ||
86 | 227 | ||
87 | relocate_new_kernel_end: | 228 | relocate_new_kernel_end: |
88 | 229 | ||