aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-02 17:17:37 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-09 02:46:25 -0400
commite821ea70f3b4873b50056a1e0f74befed1014c09 (patch)
tree5c7808e9ab65af2577a05a79726fb211f673feb8
parentd3f6204a7d65030ba92bf43a278b3f3054353e0b (diff)
powerpc: Move VMX and VSX asm code to vector.S
Currently, load_up_altivec and give_up_altivec are duplicated in 32-bit and 64-bit. This creates a common implementation that is moved away from head_32.S, head_64.S and misc_64.S and into vector.S, using the same macros we already use for our common implementation of load_up_fpu. I also moved the VSX code over to vector.S though in that case I didn't make it build on 32-bit (yet). Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--arch/powerpc/Makefile1
-rw-r--r--arch/powerpc/kernel/Makefile3
-rw-r--r--arch/powerpc/kernel/head_32.S95
-rw-r--r--arch/powerpc/kernel/head_64.S118
-rw-r--r--arch/powerpc/kernel/misc_64.S92
-rw-r--r--arch/powerpc/kernel/vector.S210
6 files changed, 213 insertions, 306 deletions
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 551fc58c05cf..bc35f4e2b81c 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -142,6 +142,7 @@ head-$(CONFIG_FSL_BOOKE) := arch/powerpc/kernel/head_fsl_booke.o
142 142
143head-$(CONFIG_PPC64) += arch/powerpc/kernel/entry_64.o 143head-$(CONFIG_PPC64) += arch/powerpc/kernel/entry_64.o
144head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o 144head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o
145head-$(CONFIG_ALTIVEC) += arch/powerpc/kernel/vector.o
145 146
146core-y += arch/powerpc/kernel/ \ 147core-y += arch/powerpc/kernel/ \
147 arch/powerpc/mm/ \ 148 arch/powerpc/mm/ \
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 71901fbda4a5..cbc359f69e00 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -36,7 +36,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
36 firmware.o nvram_64.o 36 firmware.o nvram_64.o
37obj64-$(CONFIG_RELOCATABLE) += reloc_64.o 37obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
38obj-$(CONFIG_PPC64) += vdso64/ 38obj-$(CONFIG_PPC64) += vdso64/
39obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o 39obj-$(CONFIG_ALTIVEC) += vecemu.o
40obj-$(CONFIG_PPC_970_NAP) += idle_power4.o 40obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
41obj-$(CONFIG_PPC_OF) += of_device.o of_platform.o prom_parse.o 41obj-$(CONFIG_PPC_OF) += of_device.o of_platform.o prom_parse.o
42obj-$(CONFIG_PPC_CLOCK) += clock.o 42obj-$(CONFIG_PPC_CLOCK) += clock.o
@@ -108,6 +108,7 @@ obj-y += ppc_save_regs.o
108endif 108endif
109 109
110extra-$(CONFIG_PPC_FPU) += fpu.o 110extra-$(CONFIG_PPC_FPU) += fpu.o
111extra-$(CONFIG_ALTIVEC) += vector.o
111extra-$(CONFIG_PPC64) += entry_64.o 112extra-$(CONFIG_PPC64) += entry_64.o
112 113
113extra-y += systbl_chk.i 114extra-y += systbl_chk.i
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index c01467f952d3..6437f905c566 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -743,101 +743,6 @@ PerformanceMonitor:
743 addi r3,r1,STACK_FRAME_OVERHEAD 743 addi r3,r1,STACK_FRAME_OVERHEAD
744 EXC_XFER_STD(0xf00, performance_monitor_exception) 744 EXC_XFER_STD(0xf00, performance_monitor_exception)
745 745
746#ifdef CONFIG_ALTIVEC
747/* Note that the AltiVec support is closely modeled after the FP
748 * support. Changes to one are likely to be applicable to the
749 * other! */
750load_up_altivec:
751/*
752 * Disable AltiVec for the task which had AltiVec previously,
753 * and save its AltiVec registers in its thread_struct.
754 * Enables AltiVec for use in the kernel on return.
755 * On SMP we know the AltiVec units are free, since we give it up every
756 * switch. -- Kumar
757 */
758 mfmsr r5
759 oris r5,r5,MSR_VEC@h
760 MTMSRD(r5) /* enable use of AltiVec now */
761 isync
762/*
763 * For SMP, we don't do lazy AltiVec switching because it just gets too
764 * horrendously complex, especially when a task switches from one CPU
765 * to another. Instead we call giveup_altivec in switch_to.
766 */
767#ifndef CONFIG_SMP
768 tophys(r6,0)
769 addis r3,r6,last_task_used_altivec@ha
770 lwz r4,last_task_used_altivec@l(r3)
771 cmpwi 0,r4,0
772 beq 1f
773 add r4,r4,r6
774 addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */
775 SAVE_32VRS(0,r10,r4)
776 mfvscr vr0
777 li r10,THREAD_VSCR
778 stvx vr0,r10,r4
779 lwz r5,PT_REGS(r4)
780 add r5,r5,r6
781 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
782 lis r10,MSR_VEC@h
783 andc r4,r4,r10 /* disable altivec for previous task */
784 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
7851:
786#endif /* CONFIG_SMP */
787 /* enable use of AltiVec after return */
788 oris r9,r9,MSR_VEC@h
789 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
790 li r4,1
791 li r10,THREAD_VSCR
792 stw r4,THREAD_USED_VR(r5)
793 lvx vr0,r10,r5
794 mtvscr vr0
795 REST_32VRS(0,r10,r5)
796#ifndef CONFIG_SMP
797 subi r4,r5,THREAD
798 sub r4,r4,r6
799 stw r4,last_task_used_altivec@l(r3)
800#endif /* CONFIG_SMP */
801 /* restore registers and return */
802 /* we haven't used ctr or xer or lr */
803 b fast_exception_return
804
805/*
806 * giveup_altivec(tsk)
807 * Disable AltiVec for the task given as the argument,
808 * and save the AltiVec registers in its thread_struct.
809 * Enables AltiVec for use in the kernel on return.
810 */
811
812 .globl giveup_altivec
813giveup_altivec:
814 mfmsr r5
815 oris r5,r5,MSR_VEC@h
816 SYNC
817 MTMSRD(r5) /* enable use of AltiVec now */
818 isync
819 cmpwi 0,r3,0
820 beqlr- /* if no previous owner, done */
821 addi r3,r3,THREAD /* want THREAD of task */
822 lwz r5,PT_REGS(r3)
823 cmpwi 0,r5,0
824 SAVE_32VRS(0, r4, r3)
825 mfvscr vr0
826 li r4,THREAD_VSCR
827 stvx vr0,r4,r3
828 beq 1f
829 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
830 lis r3,MSR_VEC@h
831 andc r4,r4,r3 /* disable AltiVec for previous task */
832 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8331:
834#ifndef CONFIG_SMP
835 li r5,0
836 lis r4,last_task_used_altivec@ha
837 stw r5,last_task_used_altivec@l(r4)
838#endif /* CONFIG_SMP */
839 blr
840#endif /* CONFIG_ALTIVEC */
841 746
842/* 747/*
843 * This code is jumped to from the startup code to copy 748 * This code is jumped to from the startup code to copy
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 50ef505b8fb6..382495fa90b0 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -844,124 +844,6 @@ unrecov_fer:
844 bl .unrecoverable_exception 844 bl .unrecoverable_exception
845 b 1b 845 b 1b
846 846
847#ifdef CONFIG_ALTIVEC
848/*
849 * load_up_altivec(unused, unused, tsk)
850 * Disable VMX for the task which had it previously,
851 * and save its vector registers in its thread_struct.
852 * Enables the VMX for use in the kernel on return.
853 * On SMP we know the VMX is free, since we give it up every
854 * switch (ie, no lazy save of the vector registers).
855 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
856 */
857_STATIC(load_up_altivec)
858 mfmsr r5 /* grab the current MSR */
859 oris r5,r5,MSR_VEC@h
860 mtmsrd r5 /* enable use of VMX now */
861 isync
862
863/*
864 * For SMP, we don't do lazy VMX switching because it just gets too
865 * horrendously complex, especially when a task switches from one CPU
866 * to another. Instead we call giveup_altvec in switch_to.
867 * VRSAVE isn't dealt with here, that is done in the normal context
868 * switch code. Note that we could rely on vrsave value to eventually
869 * avoid saving all of the VREGs here...
870 */
871#ifndef CONFIG_SMP
872 ld r3,last_task_used_altivec@got(r2)
873 ld r4,0(r3)
874 cmpdi 0,r4,0
875 beq 1f
876 /* Save VMX state to last_task_used_altivec's THREAD struct */
877 addi r4,r4,THREAD
878 SAVE_32VRS(0,r5,r4)
879 mfvscr vr0
880 li r10,THREAD_VSCR
881 stvx vr0,r10,r4
882 /* Disable VMX for last_task_used_altivec */
883 ld r5,PT_REGS(r4)
884 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
885 lis r6,MSR_VEC@h
886 andc r4,r4,r6
887 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8881:
889#endif /* CONFIG_SMP */
890 /* Hack: if we get an altivec unavailable trap with VRSAVE
891 * set to all zeros, we assume this is a broken application
892 * that fails to set it properly, and thus we switch it to
893 * all 1's
894 */
895 mfspr r4,SPRN_VRSAVE
896 cmpdi 0,r4,0
897 bne+ 1f
898 li r4,-1
899 mtspr SPRN_VRSAVE,r4
9001:
901 /* enable use of VMX after return */
902 ld r4,PACACURRENT(r13)
903 addi r5,r4,THREAD /* Get THREAD */
904 oris r12,r12,MSR_VEC@h
905 std r12,_MSR(r1)
906 li r4,1
907 li r10,THREAD_VSCR
908 stw r4,THREAD_USED_VR(r5)
909 lvx vr0,r10,r5
910 mtvscr vr0
911 REST_32VRS(0,r4,r5)
912#ifndef CONFIG_SMP
913 /* Update last_task_used_math to 'current' */
914 subi r4,r5,THREAD /* Back to 'current' */
915 std r4,0(r3)
916#endif /* CONFIG_SMP */
917 /* restore registers and return */
918 blr
919#endif /* CONFIG_ALTIVEC */
920
921#ifdef CONFIG_VSX
922/*
923 * load_up_vsx(unused, unused, tsk)
924 * Disable VSX for the task which had it previously,
925 * and save its vector registers in its thread_struct.
926 * Reuse the fp and vsx saves, but first check to see if they have
927 * been saved already.
928 * On entry: r13 == 'current' && last_task_used_vsx != 'current'
929 */
930_STATIC(load_up_vsx)
931/* Load FP and VSX registers if they haven't been done yet */
932 andi. r5,r12,MSR_FP
933 beql+ load_up_fpu /* skip if already loaded */
934 andis. r5,r12,MSR_VEC@h
935 beql+ load_up_altivec /* skip if already loaded */
936
937#ifndef CONFIG_SMP
938 ld r3,last_task_used_vsx@got(r2)
939 ld r4,0(r3)
940 cmpdi 0,r4,0
941 beq 1f
942 /* Disable VSX for last_task_used_vsx */
943 addi r4,r4,THREAD
944 ld r5,PT_REGS(r4)
945 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
946 lis r6,MSR_VSX@h
947 andc r6,r4,r6
948 std r6,_MSR-STACK_FRAME_OVERHEAD(r5)
9491:
950#endif /* CONFIG_SMP */
951 ld r4,PACACURRENT(r13)
952 addi r4,r4,THREAD /* Get THREAD */
953 li r6,1
954 stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
955 /* enable use of VSX after return */
956 oris r12,r12,MSR_VSX@h
957 std r12,_MSR(r1)
958#ifndef CONFIG_SMP
959 /* Update last_task_used_math to 'current' */
960 ld r4,PACACURRENT(r13)
961 std r4,0(r3)
962#endif /* CONFIG_SMP */
963 b fast_exception_return
964#endif /* CONFIG_VSX */
965 847
966/* 848/*
967 * Hash table stuff 849 * Hash table stuff
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index b9530b2395a2..a5cf9c1356a6 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -457,98 +457,6 @@ _GLOBAL(disable_kernel_fp)
457 isync 457 isync
458 blr 458 blr
459 459
460#ifdef CONFIG_ALTIVEC
461
462#if 0 /* this has no callers for now */
463/*
464 * disable_kernel_altivec()
465 * Disable the VMX.
466 */
467_GLOBAL(disable_kernel_altivec)
468 mfmsr r3
469 rldicl r0,r3,(63-MSR_VEC_LG),1
470 rldicl r3,r0,(MSR_VEC_LG+1),0
471 mtmsrd r3 /* disable use of VMX now */
472 isync
473 blr
474#endif /* 0 */
475
476/*
477 * giveup_altivec(tsk)
478 * Disable VMX for the task given as the argument,
479 * and save the vector registers in its thread_struct.
480 * Enables the VMX for use in the kernel on return.
481 */
482_GLOBAL(giveup_altivec)
483 mfmsr r5
484 oris r5,r5,MSR_VEC@h
485 mtmsrd r5 /* enable use of VMX now */
486 isync
487 cmpdi 0,r3,0
488 beqlr- /* if no previous owner, done */
489 addi r3,r3,THREAD /* want THREAD of task */
490 ld r5,PT_REGS(r3)
491 cmpdi 0,r5,0
492 SAVE_32VRS(0,r4,r3)
493 mfvscr vr0
494 li r4,THREAD_VSCR
495 stvx vr0,r4,r3
496 beq 1f
497 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
498#ifdef CONFIG_VSX
499BEGIN_FTR_SECTION
500 lis r3,(MSR_VEC|MSR_VSX)@h
501FTR_SECTION_ELSE
502 lis r3,MSR_VEC@h
503ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
504#else
505 lis r3,MSR_VEC@h
506#endif
507 andc r4,r4,r3 /* disable FP for previous task */
508 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
5091:
510#ifndef CONFIG_SMP
511 li r5,0
512 ld r4,last_task_used_altivec@got(r2)
513 std r5,0(r4)
514#endif /* CONFIG_SMP */
515 blr
516
517#endif /* CONFIG_ALTIVEC */
518
519#ifdef CONFIG_VSX
520/*
521 * __giveup_vsx(tsk)
522 * Disable VSX for the task given as the argument.
523 * Does NOT save vsx registers.
524 * Enables the VSX for use in the kernel on return.
525 */
526_GLOBAL(__giveup_vsx)
527 mfmsr r5
528 oris r5,r5,MSR_VSX@h
529 mtmsrd r5 /* enable use of VSX now */
530 isync
531
532 cmpdi 0,r3,0
533 beqlr- /* if no previous owner, done */
534 addi r3,r3,THREAD /* want THREAD of task */
535 ld r5,PT_REGS(r3)
536 cmpdi 0,r5,0
537 beq 1f
538 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
539 lis r3,MSR_VSX@h
540 andc r4,r4,r3 /* disable VSX for previous task */
541 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
5421:
543#ifndef CONFIG_SMP
544 li r5,0
545 ld r4,last_task_used_vsx@got(r2)
546 std r5,0(r4)
547#endif /* CONFIG_SMP */
548 blr
549
550#endif /* CONFIG_VSX */
551
552/* kexec_wait(phys_cpu) 460/* kexec_wait(phys_cpu)
553 * 461 *
554 * wait for the flag to change, indicating this kernel is going away but 462 * wait for the flag to change, indicating this kernel is going away but
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 49ac3d6e1399..ef36cbbc5882 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -1,5 +1,215 @@
1#include <asm/processor.h>
1#include <asm/ppc_asm.h> 2#include <asm/ppc_asm.h>
2#include <asm/reg.h> 3#include <asm/reg.h>
4#include <asm/asm-offsets.h>
5#include <asm/cputable.h>
6#include <asm/thread_info.h>
7#include <asm/page.h>
8
9/*
10 * load_up_altivec(unused, unused, tsk)
11 * Disable VMX for the task which had it previously,
12 * and save its vector registers in its thread_struct.
13 * Enables the VMX for use in the kernel on return.
14 * On SMP we know the VMX is free, since we give it up every
15 * switch (ie, no lazy save of the vector registers).
16 */
17_GLOBAL(load_up_altivec)
18 mfmsr r5 /* grab the current MSR */
19 oris r5,r5,MSR_VEC@h
20 MTMSRD(r5) /* enable use of AltiVec now */
21 isync
22
23/*
24 * For SMP, we don't do lazy VMX switching because it just gets too
25 * horrendously complex, especially when a task switches from one CPU
26 * to another. Instead we call giveup_altvec in switch_to.
27 * VRSAVE isn't dealt with here, that is done in the normal context
28 * switch code. Note that we could rely on vrsave value to eventually
29 * avoid saving all of the VREGs here...
30 */
31#ifndef CONFIG_SMP
32 LOAD_REG_ADDRBASE(r3, last_task_used_altivec)
33 toreal(r3)
34 PPC_LL r4,ADDROFF(last_task_used_altivec)(r3)
35 PPC_LCMPI 0,r4,0
36 beq 1f
37
38 /* Save VMX state to last_task_used_altivec's THREAD struct */
39 toreal(r4)
40 addi r4,r4,THREAD
41 SAVE_32VRS(0,r5,r4)
42 mfvscr vr0
43 li r10,THREAD_VSCR
44 stvx vr0,r10,r4
45 /* Disable VMX for last_task_used_altivec */
46 PPC_LL r5,PT_REGS(r4)
47 toreal(r5)
48 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
49 lis r10,MSR_VEC@h
50 andc r4,r4,r10
51 PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
521:
53#endif /* CONFIG_SMP */
54
55 /* Hack: if we get an altivec unavailable trap with VRSAVE
56 * set to all zeros, we assume this is a broken application
57 * that fails to set it properly, and thus we switch it to
58 * all 1's
59 */
60 mfspr r4,SPRN_VRSAVE
61 cmpdi 0,r4,0
62 bne+ 1f
63 li r4,-1
64 mtspr SPRN_VRSAVE,r4
651:
66 /* enable use of VMX after return */
67#ifdef CONFIG_PPC32
68 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
69 oris r9,r9,MSR_VEC@h
70#else
71 ld r4,PACACURRENT(r13)
72 addi r5,r4,THREAD /* Get THREAD */
73 oris r12,r12,MSR_VEC@h
74 std r12,_MSR(r1)
75#endif
76 li r4,1
77 li r10,THREAD_VSCR
78 stw r4,THREAD_USED_VR(r5)
79 lvx vr0,r10,r5
80 mtvscr vr0
81 REST_32VRS(0,r4,r5)
82#ifndef CONFIG_SMP
83 /* Update last_task_used_math to 'current' */
84 subi r4,r5,THREAD /* Back to 'current' */
85 fromreal(r4)
86 PPC_STL r4,ADDROFF(last_task_used_math)(r3)
87#endif /* CONFIG_SMP */
88 /* restore registers and return */
89 blr
90
91/*
92 * giveup_altivec(tsk)
93 * Disable VMX for the task given as the argument,
94 * and save the vector registers in its thread_struct.
95 * Enables the VMX for use in the kernel on return.
96 */
97_GLOBAL(giveup_altivec)
98 mfmsr r5
99 oris r5,r5,MSR_VEC@h
100 SYNC
101 MTMSRD(r5) /* enable use of VMX now */
102 isync
103 PPC_LCMPI 0,r3,0
104 beqlr- /* if no previous owner, done */
105 addi r3,r3,THREAD /* want THREAD of task */
106 PPC_LL r5,PT_REGS(r3)
107 PPC_LCMPI 0,r5,0
108 SAVE_32VRS(0,r4,r3)
109 mfvscr vr0
110 li r4,THREAD_VSCR
111 stvx vr0,r4,r3
112 beq 1f
113 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
114#ifdef CONFIG_VSX
115BEGIN_FTR_SECTION
116 lis r3,(MSR_VEC|MSR_VSX)@h
117FTR_SECTION_ELSE
118 lis r3,MSR_VEC@h
119ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
120#else
121 lis r3,MSR_VEC@h
122#endif
123 andc r4,r4,r3 /* disable FP for previous task */
124 PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1251:
126#ifndef CONFIG_SMP
127 li r5,0
128 LOAD_REG_ADDRBASE(r4,last_task_used_altivec)
129 PPC_STL r5,ADDROFF(last_task_used_altivec)(r4)
130#endif /* CONFIG_SMP */
131 blr
132
133#ifdef CONFIG_VSX
134
135#ifdef CONFIG_PPC32
136#error This asm code isn't ready for 32-bit kernels
137#endif
138
139/*
140 * load_up_vsx(unused, unused, tsk)
141 * Disable VSX for the task which had it previously,
142 * and save its vector registers in its thread_struct.
143 * Reuse the fp and vsx saves, but first check to see if they have
144 * been saved already.
145 */
146_GLOBAL(load_up_vsx)
147/* Load FP and VSX registers if they haven't been done yet */
148 andi. r5,r12,MSR_FP
149 beql+ load_up_fpu /* skip if already loaded */
150 andis. r5,r12,MSR_VEC@h
151 beql+ load_up_altivec /* skip if already loaded */
152
153#ifndef CONFIG_SMP
154 ld r3,last_task_used_vsx@got(r2)
155 ld r4,0(r3)
156 cmpdi 0,r4,0
157 beq 1f
158 /* Disable VSX for last_task_used_vsx */
159 addi r4,r4,THREAD
160 ld r5,PT_REGS(r4)
161 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
162 lis r6,MSR_VSX@h
163 andc r6,r4,r6
164 std r6,_MSR-STACK_FRAME_OVERHEAD(r5)
1651:
166#endif /* CONFIG_SMP */
167 ld r4,PACACURRENT(r13)
168 addi r4,r4,THREAD /* Get THREAD */
169 li r6,1
170 stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
171 /* enable use of VSX after return */
172 oris r12,r12,MSR_VSX@h
173 std r12,_MSR(r1)
174#ifndef CONFIG_SMP
175 /* Update last_task_used_math to 'current' */
176 ld r4,PACACURRENT(r13)
177 std r4,0(r3)
178#endif /* CONFIG_SMP */
179 b fast_exception_return
180
181/*
182 * __giveup_vsx(tsk)
183 * Disable VSX for the task given as the argument.
184 * Does NOT save vsx registers.
185 * Enables the VSX for use in the kernel on return.
186 */
187_GLOBAL(__giveup_vsx)
188 mfmsr r5
189 oris r5,r5,MSR_VSX@h
190 mtmsrd r5 /* enable use of VSX now */
191 isync
192
193 cmpdi 0,r3,0
194 beqlr- /* if no previous owner, done */
195 addi r3,r3,THREAD /* want THREAD of task */
196 ld r5,PT_REGS(r3)
197 cmpdi 0,r5,0
198 beq 1f
199 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
200 lis r3,MSR_VSX@h
201 andc r4,r4,r3 /* disable VSX for previous task */
202 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
2031:
204#ifndef CONFIG_SMP
205 li r5,0
206 ld r4,last_task_used_vsx@got(r2)
207 std r5,0(r4)
208#endif /* CONFIG_SMP */
209 blr
210
211#endif /* CONFIG_VSX */
212
3 213
4/* 214/*
5 * The routines below are in assembler so we can closely control the 215 * The routines below are in assembler so we can closely control the