aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/head_64.S
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-02 17:17:37 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-09 02:46:25 -0400
commite821ea70f3b4873b50056a1e0f74befed1014c09 (patch)
tree5c7808e9ab65af2577a05a79726fb211f673feb8 /arch/powerpc/kernel/head_64.S
parentd3f6204a7d65030ba92bf43a278b3f3054353e0b (diff)
powerpc: Move VMX and VSX asm code to vector.S
Currently, load_up_altivec and give_up_altivec are duplicated in 32-bit and 64-bit. This creates a common implementation that is moved away from head_32.S, head_64.S and misc_64.S and into vector.S, using the same macros we already use for our common implementation of load_up_fpu. I also moved the VSX code over to vector.S though in that case I didn't make it build on 32-bit (yet). Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/head_64.S')
-rw-r--r--arch/powerpc/kernel/head_64.S118
1 files changed, 0 insertions, 118 deletions
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 50ef505b8fb6..382495fa90b0 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -844,124 +844,6 @@ unrecov_fer:
844 bl .unrecoverable_exception 844 bl .unrecoverable_exception
845 b 1b 845 b 1b
846 846
847#ifdef CONFIG_ALTIVEC
848/*
849 * load_up_altivec(unused, unused, tsk)
850 * Disable VMX for the task which had it previously,
851 * and save its vector registers in its thread_struct.
852 * Enables the VMX for use in the kernel on return.
853 * On SMP we know the VMX is free, since we give it up every
854 * switch (ie, no lazy save of the vector registers).
855 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
856 */
857_STATIC(load_up_altivec)
858 mfmsr r5 /* grab the current MSR */
859 oris r5,r5,MSR_VEC@h
860 mtmsrd r5 /* enable use of VMX now */
861 isync
862
863/*
864 * For SMP, we don't do lazy VMX switching because it just gets too
865 * horrendously complex, especially when a task switches from one CPU
866 * to another. Instead we call giveup_altvec in switch_to.
867 * VRSAVE isn't dealt with here, that is done in the normal context
868 * switch code. Note that we could rely on vrsave value to eventually
869 * avoid saving all of the VREGs here...
870 */
871#ifndef CONFIG_SMP
872 ld r3,last_task_used_altivec@got(r2)
873 ld r4,0(r3)
874 cmpdi 0,r4,0
875 beq 1f
876 /* Save VMX state to last_task_used_altivec's THREAD struct */
877 addi r4,r4,THREAD
878 SAVE_32VRS(0,r5,r4)
879 mfvscr vr0
880 li r10,THREAD_VSCR
881 stvx vr0,r10,r4
882 /* Disable VMX for last_task_used_altivec */
883 ld r5,PT_REGS(r4)
884 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
885 lis r6,MSR_VEC@h
886 andc r4,r4,r6
887 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8881:
889#endif /* CONFIG_SMP */
890 /* Hack: if we get an altivec unavailable trap with VRSAVE
891 * set to all zeros, we assume this is a broken application
892 * that fails to set it properly, and thus we switch it to
893 * all 1's
894 */
895 mfspr r4,SPRN_VRSAVE
896 cmpdi 0,r4,0
897 bne+ 1f
898 li r4,-1
899 mtspr SPRN_VRSAVE,r4
9001:
901 /* enable use of VMX after return */
902 ld r4,PACACURRENT(r13)
903 addi r5,r4,THREAD /* Get THREAD */
904 oris r12,r12,MSR_VEC@h
905 std r12,_MSR(r1)
906 li r4,1
907 li r10,THREAD_VSCR
908 stw r4,THREAD_USED_VR(r5)
909 lvx vr0,r10,r5
910 mtvscr vr0
911 REST_32VRS(0,r4,r5)
912#ifndef CONFIG_SMP
913 /* Update last_task_used_math to 'current' */
914 subi r4,r5,THREAD /* Back to 'current' */
915 std r4,0(r3)
916#endif /* CONFIG_SMP */
917 /* restore registers and return */
918 blr
919#endif /* CONFIG_ALTIVEC */
920
921#ifdef CONFIG_VSX
922/*
923 * load_up_vsx(unused, unused, tsk)
924 * Disable VSX for the task which had it previously,
925 * and save its vector registers in its thread_struct.
926 * Reuse the fp and vsx saves, but first check to see if they have
927 * been saved already.
928 * On entry: r13 == 'current' && last_task_used_vsx != 'current'
929 */
930_STATIC(load_up_vsx)
931/* Load FP and VSX registers if they haven't been done yet */
932 andi. r5,r12,MSR_FP
933 beql+ load_up_fpu /* skip if already loaded */
934 andis. r5,r12,MSR_VEC@h
935 beql+ load_up_altivec /* skip if already loaded */
936
937#ifndef CONFIG_SMP
938 ld r3,last_task_used_vsx@got(r2)
939 ld r4,0(r3)
940 cmpdi 0,r4,0
941 beq 1f
942 /* Disable VSX for last_task_used_vsx */
943 addi r4,r4,THREAD
944 ld r5,PT_REGS(r4)
945 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
946 lis r6,MSR_VSX@h
947 andc r6,r4,r6
948 std r6,_MSR-STACK_FRAME_OVERHEAD(r5)
9491:
950#endif /* CONFIG_SMP */
951 ld r4,PACACURRENT(r13)
952 addi r4,r4,THREAD /* Get THREAD */
953 li r6,1
954 stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
955 /* enable use of VSX after return */
956 oris r12,r12,MSR_VSX@h
957 std r12,_MSR(r1)
958#ifndef CONFIG_SMP
959 /* Update last_task_used_math to 'current' */
960 ld r4,PACACURRENT(r13)
961 std r4,0(r3)
962#endif /* CONFIG_SMP */
963 b fast_exception_return
964#endif /* CONFIG_VSX */
965 847
966/* 848/*
967 * Hash table stuff 849 * Hash table stuff