aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel/head.S
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-10-27 02:27:25 -0400
committerPaul Mackerras <paulus@samba.org>2005-10-27 06:48:50 -0400
commit25c8a78b1e00ac0cc640677eda78b462c2cd4c6e (patch)
treea0044f8b2b557799a8cb3346b590fcd3a8507ed7 /arch/ppc64/kernel/head.S
parentfda262b8978d0089758ef9444508434c74113a61 (diff)
[PATCH] powerpc: Fix handling of fpscr on 64-bit
The recent merge of fpu.S broken the handling of fpscr for ARCH=powerpc and CONFIG_PPC64=y. FP registers could be corrupted, leading to strange random application crashes. The confusion arises, because the thread_struct has (and requires) a 64-bit area to save the fpscr, because we use load/store double instructions to get it in to/out of the FPU. However, only the low 32-bits are actually used, so we want to treat it as a 32-bit quantity when manipulating its bits to avoid extra load/stores on 32-bit. This patch replaces the current definition with a structure of two 32-bit quantities (pad and val), to clarify things as much as is possible. The 'val' field is used when manipulating bits, the structure itself is used when obtaining the address for loading/unloading the value from the FPU. While we're at it, consolidate the 4 (!) almost identical versions of cvt_fd() and cvt_df() (arch/ppc/kernel/misc.S, arch/ppc64/kernel/misc.S, arch/powerpc/kernel/misc_32.S, arch/powerpc/kernel/misc_64.S) into a single version in fpu.S. The new version takes a pointer to thread_struct and applies the correct offset itself, rather than a pointer to the fpscr field itself, again to avoid confusion as to which is the correct field to use. Finally, this patch makes ARCH=ppc64 also use the consolidated fpu.S code, which it previously did not. Built for G5 (ARCH=ppc64 and ARCH=powerpc), 32-bit powermac (ARCH=ppc and ARCH=powerpc) and Walnut (ARCH=ppc, CONFIG_MATH_EMULATION=y). Booted on G5 (ARCH=powerpc) and things which previously fell over no longer do. Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/ppc64/kernel/head.S')
-rw-r--r--arch/ppc64/kernel/head.S59
1 files changed, 2 insertions, 57 deletions
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index f58af9c246cb..929f9f42cf7a 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -81,7 +81,7 @@ _stext:
81_GLOBAL(__start) 81_GLOBAL(__start)
82 /* NOP this out unconditionally */ 82 /* NOP this out unconditionally */
83BEGIN_FTR_SECTION 83BEGIN_FTR_SECTION
84 b .__start_initialization_multiplatform 84 b .__start_initialization_multiplatform
85END_FTR_SECTION(0, 1) 85END_FTR_SECTION(0, 1)
86#endif /* CONFIG_PPC_MULTIPLATFORM */ 86#endif /* CONFIG_PPC_MULTIPLATFORM */
87 87
@@ -747,6 +747,7 @@ bad_stack:
747 * any task or sent any task a signal, you should use 747 * any task or sent any task a signal, you should use
748 * ret_from_except or ret_from_except_lite instead of this. 748 * ret_from_except or ret_from_except_lite instead of this.
749 */ 749 */
750 .globl fast_exception_return
750fast_exception_return: 751fast_exception_return:
751 ld r12,_MSR(r1) 752 ld r12,_MSR(r1)
752 ld r11,_NIP(r1) 753 ld r11,_NIP(r1)
@@ -858,62 +859,6 @@ fp_unavailable_common:
858 bl .kernel_fp_unavailable_exception 859 bl .kernel_fp_unavailable_exception
859 BUG_OPCODE 860 BUG_OPCODE
860 861
861/*
862 * load_up_fpu(unused, unused, tsk)
863 * Disable FP for the task which had the FPU previously,
864 * and save its floating-point registers in its thread_struct.
865 * Enables the FPU for use in the kernel on return.
866 * On SMP we know the fpu is free, since we give it up every
867 * switch (ie, no lazy save of the FP registers).
868 * On entry: r13 == 'current' && last_task_used_math != 'current'
869 */
870_STATIC(load_up_fpu)
871 mfmsr r5 /* grab the current MSR */
872 ori r5,r5,MSR_FP
873 mtmsrd r5 /* enable use of fpu now */
874 isync
875/*
876 * For SMP, we don't do lazy FPU switching because it just gets too
877 * horrendously complex, especially when a task switches from one CPU
878 * to another. Instead we call giveup_fpu in switch_to.
879 *
880 */
881#ifndef CONFIG_SMP
882 ld r3,last_task_used_math@got(r2)
883 ld r4,0(r3)
884 cmpdi 0,r4,0
885 beq 1f
886 /* Save FP state to last_task_used_math's THREAD struct */
887 addi r4,r4,THREAD
888 SAVE_32FPRS(0, r4)
889 mffs fr0
890 stfd fr0,THREAD_FPSCR(r4)
891 /* Disable FP for last_task_used_math */
892 ld r5,PT_REGS(r4)
893 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
894 li r6,MSR_FP|MSR_FE0|MSR_FE1
895 andc r4,r4,r6
896 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8971:
898#endif /* CONFIG_SMP */
899 /* enable use of FP after return */
900 ld r4,PACACURRENT(r13)
901 addi r5,r4,THREAD /* Get THREAD */
902 ld r4,THREAD_FPEXC_MODE(r5)
903 ori r12,r12,MSR_FP
904 or r12,r12,r4
905 std r12,_MSR(r1)
906 lfd fr0,THREAD_FPSCR(r5)
907 mtfsf 0xff,fr0
908 REST_32FPRS(0, r5)
909#ifndef CONFIG_SMP
910 /* Update last_task_used_math to 'current' */
911 subi r4,r5,THREAD /* Back to 'current' */
912 std r4,0(r3)
913#endif /* CONFIG_SMP */
914 /* restore registers and return */
915 b fast_exception_return
916
917 .align 7 862 .align 7
918 .globl altivec_unavailable_common 863 .globl altivec_unavailable_common
919altivec_unavailable_common: 864altivec_unavailable_common: