aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c48
-rw-r--r--arch/powerpc/kernel/misc_64.S8
-rw-r--r--arch/powerpc/kernel/paca.c2
4 files changed, 42 insertions, 17 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 0271b58ec31e..1b784ff92d9d 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -184,6 +184,7 @@ int main(void)
184#endif /* CONFIG_PPC_STD_MMU_64 */ 184#endif /* CONFIG_PPC_STD_MMU_64 */
185 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); 185 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
186 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); 186 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
187 DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state));
187 DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr)); 188 DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
188 DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr)); 189 DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
189 DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); 190 DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 040bd1de8d99..26f9900f773c 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -155,33 +155,38 @@ void kexec_copy_flush(struct kimage *image)
155 155
156#ifdef CONFIG_SMP 156#ifdef CONFIG_SMP
157 157
158/* FIXME: we should schedule this function to be called on all cpus based 158static int kexec_all_irq_disabled = 0;
159 * on calling the interrupts, but we would like to call it off irq level 159
160 * so that the interrupt controller is clean.
161 */
162static void kexec_smp_down(void *arg) 160static void kexec_smp_down(void *arg)
163{ 161{
162 local_irq_disable();
163 mb(); /* make sure our irqs are disabled before we say they are */
164 get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
165 while(kexec_all_irq_disabled == 0)
166 cpu_relax();
167 mb(); /* make sure all irqs are disabled before this */
168 /*
169 * Now every CPU has IRQs off, we can clear out any pending
170 * IPIs and be sure that no more will come in after this.
171 */
164 if (ppc_md.kexec_cpu_down) 172 if (ppc_md.kexec_cpu_down)
165 ppc_md.kexec_cpu_down(0, 1); 173 ppc_md.kexec_cpu_down(0, 1);
166 174
167 local_irq_disable();
168 kexec_smp_wait(); 175 kexec_smp_wait();
169 /* NOTREACHED */ 176 /* NOTREACHED */
170} 177}
171 178
172static void kexec_prepare_cpus(void) 179static void kexec_prepare_cpus_wait(int wait_state)
173{ 180{
174 int my_cpu, i, notified=-1; 181 int my_cpu, i, notified=-1;
175 182
176 smp_call_function(kexec_smp_down, NULL, /* wait */0);
177 my_cpu = get_cpu(); 183 my_cpu = get_cpu();
178 184 /* Make sure each CPU has atleast made it to the state we need */
179 /* check the others cpus are now down (via paca hw cpu id == -1) */
180 for (i=0; i < NR_CPUS; i++) { 185 for (i=0; i < NR_CPUS; i++) {
181 if (i == my_cpu) 186 if (i == my_cpu)
182 continue; 187 continue;
183 188
184 while (paca[i].hw_cpu_id != -1) { 189 while (paca[i].kexec_state < wait_state) {
185 barrier(); 190 barrier();
186 if (!cpu_possible(i)) { 191 if (!cpu_possible(i)) {
187 printk("kexec: cpu %d hw_cpu_id %d is not" 192 printk("kexec: cpu %d hw_cpu_id %d is not"
@@ -201,20 +206,35 @@ static void kexec_prepare_cpus(void)
201 } 206 }
202 if (i != notified) { 207 if (i != notified) {
203 printk( "kexec: waiting for cpu %d (physical" 208 printk( "kexec: waiting for cpu %d (physical"
204 " %d) to go down\n", 209 " %d) to enter %i state\n",
205 i, paca[i].hw_cpu_id); 210 i, paca[i].hw_cpu_id, wait_state);
206 notified = i; 211 notified = i;
207 } 212 }
208 } 213 }
209 } 214 }
215 mb();
216}
217
218static void kexec_prepare_cpus(void)
219{
220
221 smp_call_function(kexec_smp_down, NULL, /* wait */0);
222 local_irq_disable();
223 mb(); /* make sure IRQs are disabled before we say they are */
224 get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
225
226 kexec_prepare_cpus_wait(KEXEC_STATE_IRQS_OFF);
227 /* we are sure every CPU has IRQs off at this point */
228 kexec_all_irq_disabled = 1;
210 229
211 /* after we tell the others to go down */ 230 /* after we tell the others to go down */
212 if (ppc_md.kexec_cpu_down) 231 if (ppc_md.kexec_cpu_down)
213 ppc_md.kexec_cpu_down(0, 0); 232 ppc_md.kexec_cpu_down(0, 0);
214 233
215 put_cpu(); 234 /* Before removing MMU mapings make sure all CPUs have entered real mode */
235 kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE);
216 236
217 local_irq_disable(); 237 put_cpu();
218} 238}
219 239
220#else /* ! SMP */ 240#else /* ! SMP */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index a5cf9c1356a6..a2b18dffa03e 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -24,6 +24,7 @@
24#include <asm/asm-offsets.h> 24#include <asm/asm-offsets.h>
25#include <asm/cputable.h> 25#include <asm/cputable.h>
26#include <asm/thread_info.h> 26#include <asm/thread_info.h>
27#include <asm/kexec.h>
27 28
28 .text 29 .text
29 30
@@ -471,6 +472,10 @@ _GLOBAL(kexec_wait)
4711: mflr r5 4721: mflr r5
472 addi r5,r5,kexec_flag-1b 473 addi r5,r5,kexec_flag-1b
473 474
475 li r4,KEXEC_STATE_REAL_MODE
476 stb r4,PACAKEXECSTATE(r13)
477 SYNC
478
47499: HMT_LOW 47999: HMT_LOW
475#ifdef CONFIG_KEXEC /* use no memory without kexec */ 480#ifdef CONFIG_KEXEC /* use no memory without kexec */
476 lwz r4,0(r5) 481 lwz r4,0(r5)
@@ -494,14 +499,11 @@ kexec_flag:
494 * note: this is a terminal routine, it does not save lr 499 * note: this is a terminal routine, it does not save lr
495 * 500 *
496 * get phys id from paca 501 * get phys id from paca
497 * set paca id to -1 to say we got here
498 * switch to real mode 502 * switch to real mode
499 * join other cpus in kexec_wait(phys_id) 503 * join other cpus in kexec_wait(phys_id)
500 */ 504 */
501_GLOBAL(kexec_smp_wait) 505_GLOBAL(kexec_smp_wait)
502 lhz r3,PACAHWCPUID(r13) 506 lhz r3,PACAHWCPUID(r13)
503 li r4,-1
504 sth r4,PACAHWCPUID(r13) /* let others know we left */
505 bl real_mode 507 bl real_mode
506 b .kexec_wait 508 b .kexec_wait
507 509
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 0c40c6f476fe..f88acf0218db 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -18,6 +18,7 @@
18#include <asm/pgtable.h> 18#include <asm/pgtable.h>
19#include <asm/iseries/lpar_map.h> 19#include <asm/iseries/lpar_map.h>
20#include <asm/iseries/hv_types.h> 20#include <asm/iseries/hv_types.h>
21#include <asm/kexec.h>
21 22
22/* This symbol is provided by the linker - let it fill in the paca 23/* This symbol is provided by the linker - let it fill in the paca
23 * field correctly */ 24 * field correctly */
@@ -97,6 +98,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
97 new_paca->kernelbase = (unsigned long) _stext; 98 new_paca->kernelbase = (unsigned long) _stext;
98 new_paca->kernel_msr = MSR_KERNEL; 99 new_paca->kernel_msr = MSR_KERNEL;
99 new_paca->hw_cpu_id = 0xffff; 100 new_paca->hw_cpu_id = 0xffff;
101 new_paca->kexec_state = KEXEC_STATE_NONE;
100 new_paca->__current = &init_task; 102 new_paca->__current = &init_task;
101#ifdef CONFIG_PPC_STD_MMU_64 103#ifdef CONFIG_PPC_STD_MMU_64
102 new_paca->slb_shadow_ptr = &slb_shadow[cpu]; 104 new_paca->slb_shadow_ptr = &slb_shadow[cpu];