aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/machine_kexec_64.c
diff options
context:
space:
mode:
authorAnton Blanchard <anton@au1.ibm.com>2013-05-12 11:04:53 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-05-14 00:36:23 -0400
commit79c66ce8f6448a3295a32efeac88c9debd7f7094 (patch)
tree2c3939dfec9ee8f3eeb8fb10d57252a364bf501e /arch/powerpc/kernel/machine_kexec_64.c
parent83d5e64b7efa7f39b10ff5e92792e807a720289c (diff)
powerpc/kexec: Fix kexec when using VMX optimised memcpy
commit b3f271e86e5a (powerpc: POWER7 optimised memcpy using VMX and enhanced prefetch) uses VMX when it is safe to do so (ie not in interrupt). It also looks at the task struct to decide if we have to save the current tasks' VMX state. kexec calls memcpy() at a point where the task struct may have been overwritten by the new kexec segments. If it has been overwritten then when memcpy -> enable_altivec looks up current->thread.regs->msr we get a cryptic oops or lockup. I also notice we aren't initialising thread_info->cpu, which means smp_processor_id is broken. Fix that too. Signed-off-by: Anton Blanchard <anton@samba.org> Cc: <stable@vger.kernel.org> # 3.6+ Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/machine_kexec_64.c')
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 466a2908bb63..611acdf30096 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -17,6 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/cpu.h> 19#include <linux/cpu.h>
20#include <linux/hardirq.h>
20 21
21#include <asm/page.h> 22#include <asm/page.h>
22#include <asm/current.h> 23#include <asm/current.h>
@@ -335,10 +336,13 @@ void default_machine_kexec(struct kimage *image)
335 pr_debug("kexec: Starting switchover sequence.\n"); 336 pr_debug("kexec: Starting switchover sequence.\n");
336 337
337 /* switch to a staticly allocated stack. Based on irq stack code. 338 /* switch to a staticly allocated stack. Based on irq stack code.
339 * We setup preempt_count to avoid using VMX in memcpy.
338 * XXX: the task struct will likely be invalid once we do the copy! 340 * XXX: the task struct will likely be invalid once we do the copy!
339 */ 341 */
340 kexec_stack.thread_info.task = current_thread_info()->task; 342 kexec_stack.thread_info.task = current_thread_info()->task;
341 kexec_stack.thread_info.flags = 0; 343 kexec_stack.thread_info.flags = 0;
344 kexec_stack.thread_info.preempt_count = HARDIRQ_OFFSET;
345 kexec_stack.thread_info.cpu = current_thread_info()->cpu;
342 346
343 /* We need a static PACA, too; copy this CPU's PACA over and switch to 347 /* We need a static PACA, too; copy this CPU's PACA over and switch to
344 * it. Also poison per_cpu_offset to catch anyone using non-static 348 * it. Also poison per_cpu_offset to catch anyone using non-static