diff options
author | Michael Neuling <mikey@neuling.org> | 2008-07-11 02:29:12 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-07-14 22:29:23 -0400 |
commit | 7c29217096d83f657e6ee70479af09b46f4275f6 (patch) | |
tree | 96fd96226d2998b1f56d7fb8110e27c489dbba89 | |
parent | 01f4b8b8b8db09b88be7df7e51192e4e678b69d3 (diff) |
powerpc: fix giveup_vsx to save registers correctly
giveup_vsx didn't save the FPU and VMX regsiters. Change it to be
like giveup_fpr/altivec which save these registers.
Also update call sites where FPU and VMX are already saved to use the
original giveup_vsx (renamed to __giveup_vsx).
Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r-- | arch/powerpc/kernel/misc_64.S | 8 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 10 | ||||
-rw-r--r-- | arch/powerpc/kernel/signal_32.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/signal_64.c | 2 | ||||
-rw-r--r-- | include/asm-powerpc/system.h | 1 |
5 files changed, 16 insertions, 7 deletions
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 31b9026cf1e3..4dd70cf7bb4e 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S | |||
@@ -508,12 +508,12 @@ _GLOBAL(giveup_altivec) | |||
508 | 508 | ||
509 | #ifdef CONFIG_VSX | 509 | #ifdef CONFIG_VSX |
510 | /* | 510 | /* |
511 | * giveup_vsx(tsk) | 511 | * __giveup_vsx(tsk) |
512 | * Disable VSX for the task given as the argument, | 512 | * Disable VSX for the task given as the argument. |
513 | * and save the vector registers in its thread_struct. | 513 | * Does NOT save vsx registers. |
514 | * Enables the VSX for use in the kernel on return. | 514 | * Enables the VSX for use in the kernel on return. |
515 | */ | 515 | */ |
516 | _GLOBAL(giveup_vsx) | 516 | _GLOBAL(__giveup_vsx) |
517 | mfmsr r5 | 517 | mfmsr r5 |
518 | oris r5,r5,MSR_VSX@h | 518 | oris r5,r5,MSR_VSX@h |
519 | mtmsrd r5 /* enable use of VSX now */ | 519 | mtmsrd r5 /* enable use of VSX now */ |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 0a4eb0811590..219f3634115e 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -159,6 +159,13 @@ void enable_kernel_vsx(void) | |||
159 | EXPORT_SYMBOL(enable_kernel_vsx); | 159 | EXPORT_SYMBOL(enable_kernel_vsx); |
160 | #endif | 160 | #endif |
161 | 161 | ||
162 | void giveup_vsx(struct task_struct *tsk) | ||
163 | { | ||
164 | giveup_fpu(tsk); | ||
165 | giveup_altivec(tsk); | ||
166 | __giveup_vsx(tsk); | ||
167 | } | ||
168 | |||
162 | void flush_vsx_to_thread(struct task_struct *tsk) | 169 | void flush_vsx_to_thread(struct task_struct *tsk) |
163 | { | 170 | { |
164 | if (tsk->thread.regs) { | 171 | if (tsk->thread.regs) { |
@@ -290,7 +297,8 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
290 | #endif /* CONFIG_ALTIVEC */ | 297 | #endif /* CONFIG_ALTIVEC */ |
291 | #ifdef CONFIG_VSX | 298 | #ifdef CONFIG_VSX |
292 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX)) | 299 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX)) |
293 | giveup_vsx(prev); | 300 | /* VMX and FPU registers are already save here */ |
301 | __giveup_vsx(prev); | ||
294 | #endif /* CONFIG_VSX */ | 302 | #endif /* CONFIG_VSX */ |
295 | #ifdef CONFIG_SPE | 303 | #ifdef CONFIG_SPE |
296 | /* | 304 | /* |
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 6f6810db0a74..3e80aa32b8b0 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c | |||
@@ -452,7 +452,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, | |||
452 | * contains valid data | 452 | * contains valid data |
453 | */ | 453 | */ |
454 | if (current->thread.used_vsr) { | 454 | if (current->thread.used_vsr) { |
455 | flush_vsx_to_thread(current); | 455 | __giveup_vsx(current); |
456 | if (copy_vsx_to_user(&frame->mc_vsregs, current)) | 456 | if (copy_vsx_to_user(&frame->mc_vsregs, current)) |
457 | return 1; | 457 | return 1; |
458 | msr |= MSR_VSX; | 458 | msr |= MSR_VSX; |
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 5f9d2ef2e24b..65ad925c3a8f 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c | |||
@@ -122,7 +122,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, | |||
122 | * VMX data. | 122 | * VMX data. |
123 | */ | 123 | */ |
124 | if (current->thread.used_vsr) { | 124 | if (current->thread.used_vsr) { |
125 | flush_vsx_to_thread(current); | 125 | __giveup_vsx(current); |
126 | v_regs += ELF_NVRREG; | 126 | v_regs += ELF_NVRREG; |
127 | err |= copy_vsx_to_user(v_regs, current); | 127 | err |= copy_vsx_to_user(v_regs, current); |
128 | /* set MSR_VSX in the MSR value in the frame to | 128 | /* set MSR_VSX in the MSR value in the frame to |
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index 0c12c66733f6..e6e25e2364eb 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h | |||
@@ -139,6 +139,7 @@ extern void enable_kernel_altivec(void); | |||
139 | extern void giveup_altivec(struct task_struct *); | 139 | extern void giveup_altivec(struct task_struct *); |
140 | extern void load_up_altivec(struct task_struct *); | 140 | extern void load_up_altivec(struct task_struct *); |
141 | extern int emulate_altivec(struct pt_regs *); | 141 | extern int emulate_altivec(struct pt_regs *); |
142 | extern void __giveup_vsx(struct task_struct *); | ||
142 | extern void giveup_vsx(struct task_struct *); | 143 | extern void giveup_vsx(struct task_struct *); |
143 | extern void enable_kernel_spe(void); | 144 | extern void enable_kernel_spe(void); |
144 | extern void giveup_spe(struct task_struct *); | 145 | extern void giveup_spe(struct task_struct *); |