aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/head_64.S
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2005-10-05 20:59:19 -0400
committerPaul Mackerras <paulus@samba.org>2005-10-05 20:59:19 -0400
commitb85a046af3a260e079505e8023ccd10e01cf4f2b (patch)
tree2f5f1af4db85fca6fc88902840463b107721cb14 /arch/powerpc/kernel/head_64.S
parent6ce52e6438fd2921891648ceab700d9b867e5ed2 (diff)
powerpc: Define 32/64 bit asm macros and use them in fpu.S
These macros help in writing assembly code that works for both ppc32 and ppc64. With this we now have a common fpu.S. This takes out load_up_fpu from head_64.S. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/head_64.S')
-rw-r--r--arch/powerpc/kernel/head_64.S58
1 files changed, 1 insertions, 57 deletions
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index db0cd3587627..a36ee6ecbaea 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -80,7 +80,7 @@ _stext:
80_GLOBAL(__start) 80_GLOBAL(__start)
81 /* NOP this out unconditionally */ 81 /* NOP this out unconditionally */
82BEGIN_FTR_SECTION 82BEGIN_FTR_SECTION
83 b .__start_initialization_multiplatform 83 b .__start_initialization_multiplatform
84END_FTR_SECTION(0, 1) 84END_FTR_SECTION(0, 1)
85#endif /* CONFIG_PPC_MULTIPLATFORM */ 85#endif /* CONFIG_PPC_MULTIPLATFORM */
86 86
@@ -857,62 +857,6 @@ fp_unavailable_common:
857 bl .kernel_fp_unavailable_exception 857 bl .kernel_fp_unavailable_exception
858 BUG_OPCODE 858 BUG_OPCODE
859 859
860/*
861 * load_up_fpu(unused, unused, tsk)
862 * Disable FP for the task which had the FPU previously,
863 * and save its floating-point registers in its thread_struct.
864 * Enables the FPU for use in the kernel on return.
865 * On SMP we know the fpu is free, since we give it up every
866 * switch (ie, no lazy save of the FP registers).
867 * On entry: r13 == 'current' && last_task_used_math != 'current'
868 */
869_STATIC(load_up_fpu)
870 mfmsr r5 /* grab the current MSR */
871 ori r5,r5,MSR_FP
872 mtmsrd r5 /* enable use of fpu now */
873 isync
874/*
875 * For SMP, we don't do lazy FPU switching because it just gets too
876 * horrendously complex, especially when a task switches from one CPU
877 * to another. Instead we call giveup_fpu in switch_to.
878 *
879 */
880#ifndef CONFIG_SMP
881 ld r3,last_task_used_math@got(r2)
882 ld r4,0(r3)
883 cmpdi 0,r4,0
884 beq 1f
885 /* Save FP state to last_task_used_math's THREAD struct */
886 addi r4,r4,THREAD
887 SAVE_32FPRS(0, r4)
888 mffs fr0
889 stfd fr0,THREAD_FPSCR(r4)
890 /* Disable FP for last_task_used_math */
891 ld r5,PT_REGS(r4)
892 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
893 li r6,MSR_FP|MSR_FE0|MSR_FE1
894 andc r4,r4,r6
895 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8961:
897#endif /* CONFIG_SMP */
898 /* enable use of FP after return */
899 ld r4,PACACURRENT(r13)
900 addi r5,r4,THREAD /* Get THREAD */
901 ld r4,THREAD_FPEXC_MODE(r5)
902 ori r12,r12,MSR_FP
903 or r12,r12,r4
904 std r12,_MSR(r1)
905 lfd fr0,THREAD_FPSCR(r5)
906 mtfsf 0xff,fr0
907 REST_32FPRS(0, r5)
908#ifndef CONFIG_SMP
909 /* Update last_task_used_math to 'current' */
910 subi r4,r5,THREAD /* Back to 'current' */
911 std r4,0(r3)
912#endif /* CONFIG_SMP */
913 /* restore registers and return */
914 b fast_exception_return
915
916 .align 7 860 .align 7
917 .globl altivec_unavailable_common 861 .globl altivec_unavailable_common
918altivec_unavailable_common: 862altivec_unavailable_common: