diff options
author | Anton Blanchard <anton@samba.org> | 2015-10-28 20:44:01 -0400 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2015-11-30 21:52:25 -0500 |
commit | 98da581e0846f6d932a4bc46a55458140e20478a (patch) | |
tree | 492f2ebbd22ed1000f2d61aa8ad9a7b3ef5f1a1b | |
parent | b51b1153d0e78a70767441273331d2de066bb929 (diff) |
powerpc: Move part of giveup_fpu,altivec,spe into c
Move the MSR modification into new c functions. Removing it from
the low level functions will allow us to avoid costly MSR writes
by batching them up.
Move the check_if_tm_restore_required() check into these new functions.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r-- | arch/powerpc/include/asm/switch_to.h | 21 | ||||
-rw-r--r-- | arch/powerpc/kernel/fpu.S | 16 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_fsl_booke.S | 8 | ||||
-rw-r--r-- | arch/powerpc/kernel/ppc_ksyms.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 52 | ||||
-rw-r--r-- | arch/powerpc/kernel/vector.S | 10 |
6 files changed, 65 insertions, 48 deletions
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 042aaf05a787..c2678b93bcba 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h | |||
@@ -23,28 +23,27 @@ extern int emulate_altivec(struct pt_regs *); | |||
23 | extern void __giveup_vsx(struct task_struct *); | 23 | extern void __giveup_vsx(struct task_struct *); |
24 | extern void giveup_vsx(struct task_struct *); | 24 | extern void giveup_vsx(struct task_struct *); |
25 | extern void enable_kernel_spe(void); | 25 | extern void enable_kernel_spe(void); |
26 | extern void giveup_spe(struct task_struct *); | ||
27 | extern void load_up_spe(struct task_struct *); | 26 | extern void load_up_spe(struct task_struct *); |
28 | extern void switch_booke_debug_regs(struct debug_reg *new_debug); | 27 | extern void switch_booke_debug_regs(struct debug_reg *new_debug); |
29 | 28 | ||
30 | #ifdef CONFIG_PPC_FPU | 29 | #ifdef CONFIG_PPC_FPU |
31 | extern void flush_fp_to_thread(struct task_struct *); | 30 | extern void flush_fp_to_thread(struct task_struct *); |
32 | extern void giveup_fpu(struct task_struct *); | 31 | extern void giveup_fpu(struct task_struct *); |
32 | extern void __giveup_fpu(struct task_struct *); | ||
33 | #else | 33 | #else |
34 | static inline void flush_fp_to_thread(struct task_struct *t) { } | 34 | static inline void flush_fp_to_thread(struct task_struct *t) { } |
35 | static inline void giveup_fpu(struct task_struct *t) { } | 35 | static inline void giveup_fpu(struct task_struct *t) { } |
36 | static inline void __giveup_fpu(struct task_struct *t) { } | ||
36 | #endif | 37 | #endif |
37 | 38 | ||
38 | #ifdef CONFIG_ALTIVEC | 39 | #ifdef CONFIG_ALTIVEC |
39 | extern void flush_altivec_to_thread(struct task_struct *); | 40 | extern void flush_altivec_to_thread(struct task_struct *); |
40 | extern void giveup_altivec(struct task_struct *); | 41 | extern void giveup_altivec(struct task_struct *); |
42 | extern void __giveup_altivec(struct task_struct *); | ||
41 | #else | 43 | #else |
42 | static inline void flush_altivec_to_thread(struct task_struct *t) | 44 | static inline void flush_altivec_to_thread(struct task_struct *t) { } |
43 | { | 45 | static inline void giveup_altivec(struct task_struct *t) { } |
44 | } | 46 | static inline void __giveup_altivec(struct task_struct *t) { } |
45 | static inline void giveup_altivec(struct task_struct *t) | ||
46 | { | ||
47 | } | ||
48 | #endif | 47 | #endif |
49 | 48 | ||
50 | #ifdef CONFIG_VSX | 49 | #ifdef CONFIG_VSX |
@@ -57,10 +56,12 @@ static inline void flush_vsx_to_thread(struct task_struct *t) | |||
57 | 56 | ||
58 | #ifdef CONFIG_SPE | 57 | #ifdef CONFIG_SPE |
59 | extern void flush_spe_to_thread(struct task_struct *); | 58 | extern void flush_spe_to_thread(struct task_struct *); |
59 | extern void giveup_spe(struct task_struct *); | ||
60 | extern void __giveup_spe(struct task_struct *); | ||
60 | #else | 61 | #else |
61 | static inline void flush_spe_to_thread(struct task_struct *t) | 62 | static inline void flush_spe_to_thread(struct task_struct *t) { } |
62 | { | 63 | static inline void giveup_spe(struct task_struct *t) { } |
63 | } | 64 | static inline void __giveup_spe(struct task_struct *t) { } |
64 | #endif | 65 | #endif |
65 | 66 | ||
66 | static inline void clear_task_ebb(struct task_struct *t) | 67 | static inline void clear_task_ebb(struct task_struct *t) |
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index 71bdce284ad9..431ab571ed1b 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S | |||
@@ -155,24 +155,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |||
155 | blr | 155 | blr |
156 | 156 | ||
157 | /* | 157 | /* |
158 | * giveup_fpu(tsk) | 158 | * __giveup_fpu(tsk) |
159 | * Disable FP for the task given as the argument, | 159 | * Disable FP for the task given as the argument, |
160 | * and save the floating-point registers in its thread_struct. | 160 | * and save the floating-point registers in its thread_struct. |
161 | * Enables the FPU for use in the kernel on return. | 161 | * Enables the FPU for use in the kernel on return. |
162 | */ | 162 | */ |
163 | _GLOBAL(giveup_fpu) | 163 | _GLOBAL(__giveup_fpu) |
164 | mfmsr r5 | ||
165 | ori r5,r5,MSR_FP | ||
166 | #ifdef CONFIG_VSX | ||
167 | BEGIN_FTR_SECTION | ||
168 | oris r5,r5,MSR_VSX@h | ||
169 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | ||
170 | #endif | ||
171 | SYNC_601 | ||
172 | ISYNC_601 | ||
173 | MTMSRD(r5) /* enable use of fpu now */ | ||
174 | SYNC_601 | ||
175 | isync | ||
176 | addi r3,r3,THREAD /* want THREAD of task */ | 164 | addi r3,r3,THREAD /* want THREAD of task */ |
177 | PPC_LL r6,THREAD_FPSAVEAREA(r3) | 165 | PPC_LL r6,THREAD_FPSAVEAREA(r3) |
178 | PPC_LL r5,PT_REGS(r3) | 166 | PPC_LL r5,PT_REGS(r3) |
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index d6980bbae954..f705171b924b 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S | |||
@@ -984,14 +984,10 @@ _GLOBAL(__setup_ehv_ivors) | |||
984 | 984 | ||
985 | #ifdef CONFIG_SPE | 985 | #ifdef CONFIG_SPE |
986 | /* | 986 | /* |
987 | * extern void giveup_spe(struct task_struct *prev) | 987 | * extern void __giveup_spe(struct task_struct *prev) |
988 | * | 988 | * |
989 | */ | 989 | */ |
990 | _GLOBAL(giveup_spe) | 990 | _GLOBAL(__giveup_spe) |
991 | mfmsr r5 | ||
992 | oris r5,r5,MSR_SPE@h | ||
993 | mtmsr r5 /* enable use of SPE now */ | ||
994 | isync | ||
995 | addi r3,r3,THREAD /* want THREAD of task */ | 991 | addi r3,r3,THREAD /* want THREAD of task */ |
996 | lwz r5,PT_REGS(r3) | 992 | lwz r5,PT_REGS(r3) |
997 | cmpi 0,r5,0 | 993 | cmpi 0,r5,0 |
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 202963ee013a..41e1607e800c 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
@@ -19,13 +19,11 @@ EXPORT_SYMBOL(_mcount); | |||
19 | #endif | 19 | #endif |
20 | 20 | ||
21 | #ifdef CONFIG_PPC_FPU | 21 | #ifdef CONFIG_PPC_FPU |
22 | EXPORT_SYMBOL(giveup_fpu); | ||
23 | EXPORT_SYMBOL(load_fp_state); | 22 | EXPORT_SYMBOL(load_fp_state); |
24 | EXPORT_SYMBOL(store_fp_state); | 23 | EXPORT_SYMBOL(store_fp_state); |
25 | #endif | 24 | #endif |
26 | 25 | ||
27 | #ifdef CONFIG_ALTIVEC | 26 | #ifdef CONFIG_ALTIVEC |
28 | EXPORT_SYMBOL(giveup_altivec); | ||
29 | EXPORT_SYMBOL(load_vr_state); | 27 | EXPORT_SYMBOL(load_vr_state); |
30 | EXPORT_SYMBOL(store_vr_state); | 28 | EXPORT_SYMBOL(store_vr_state); |
31 | #endif | 29 | #endif |
@@ -34,10 +32,6 @@ EXPORT_SYMBOL(store_vr_state); | |||
34 | EXPORT_SYMBOL_GPL(__giveup_vsx); | 32 | EXPORT_SYMBOL_GPL(__giveup_vsx); |
35 | #endif | 33 | #endif |
36 | 34 | ||
37 | #ifdef CONFIG_SPE | ||
38 | EXPORT_SYMBOL(giveup_spe); | ||
39 | #endif | ||
40 | |||
41 | #ifdef CONFIG_EPAPR_PARAVIRT | 35 | #ifdef CONFIG_EPAPR_PARAVIRT |
42 | EXPORT_SYMBOL(epapr_hypercall_start); | 36 | EXPORT_SYMBOL(epapr_hypercall_start); |
43 | #endif | 37 | #endif |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 5bf8ec2597d4..6bcf82bed610 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -88,6 +88,25 @@ static inline void check_if_tm_restore_required(struct task_struct *tsk) { } | |||
88 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | 88 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
89 | 89 | ||
90 | #ifdef CONFIG_PPC_FPU | 90 | #ifdef CONFIG_PPC_FPU |
91 | void giveup_fpu(struct task_struct *tsk) | ||
92 | { | ||
93 | u64 oldmsr = mfmsr(); | ||
94 | u64 newmsr; | ||
95 | |||
96 | check_if_tm_restore_required(tsk); | ||
97 | |||
98 | newmsr = oldmsr | MSR_FP; | ||
99 | #ifdef CONFIG_VSX | ||
100 | if (cpu_has_feature(CPU_FTR_VSX)) | ||
101 | newmsr |= MSR_VSX; | ||
102 | #endif | ||
103 | if (oldmsr != newmsr) | ||
104 | mtmsr_isync(newmsr); | ||
105 | |||
106 | __giveup_fpu(tsk); | ||
107 | } | ||
108 | EXPORT_SYMBOL(giveup_fpu); | ||
109 | |||
91 | /* | 110 | /* |
92 | * Make sure the floating-point register state in the | 111 | * Make sure the floating-point register state in the |
93 | * the thread_struct is up to date for task tsk. | 112 | * the thread_struct is up to date for task tsk. |
@@ -113,7 +132,6 @@ void flush_fp_to_thread(struct task_struct *tsk) | |||
113 | * to still have its FP state in the CPU registers. | 132 | * to still have its FP state in the CPU registers. |
114 | */ | 133 | */ |
115 | BUG_ON(tsk != current); | 134 | BUG_ON(tsk != current); |
116 | check_if_tm_restore_required(tsk); | ||
117 | giveup_fpu(tsk); | 135 | giveup_fpu(tsk); |
118 | } | 136 | } |
119 | preempt_enable(); | 137 | preempt_enable(); |
@@ -127,7 +145,6 @@ void enable_kernel_fp(void) | |||
127 | WARN_ON(preemptible()); | 145 | WARN_ON(preemptible()); |
128 | 146 | ||
129 | if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { | 147 | if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { |
130 | check_if_tm_restore_required(current); | ||
131 | giveup_fpu(current); | 148 | giveup_fpu(current); |
132 | } else { | 149 | } else { |
133 | u64 oldmsr = mfmsr(); | 150 | u64 oldmsr = mfmsr(); |
@@ -139,12 +156,26 @@ void enable_kernel_fp(void) | |||
139 | EXPORT_SYMBOL(enable_kernel_fp); | 156 | EXPORT_SYMBOL(enable_kernel_fp); |
140 | 157 | ||
141 | #ifdef CONFIG_ALTIVEC | 158 | #ifdef CONFIG_ALTIVEC |
159 | void giveup_altivec(struct task_struct *tsk) | ||
160 | { | ||
161 | u64 oldmsr = mfmsr(); | ||
162 | u64 newmsr; | ||
163 | |||
164 | check_if_tm_restore_required(tsk); | ||
165 | |||
166 | newmsr = oldmsr | MSR_VEC; | ||
167 | if (oldmsr != newmsr) | ||
168 | mtmsr_isync(newmsr); | ||
169 | |||
170 | __giveup_altivec(tsk); | ||
171 | } | ||
172 | EXPORT_SYMBOL(giveup_altivec); | ||
173 | |||
142 | void enable_kernel_altivec(void) | 174 | void enable_kernel_altivec(void) |
143 | { | 175 | { |
144 | WARN_ON(preemptible()); | 176 | WARN_ON(preemptible()); |
145 | 177 | ||
146 | if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) { | 178 | if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) { |
147 | check_if_tm_restore_required(current); | ||
148 | giveup_altivec(current); | 179 | giveup_altivec(current); |
149 | } else { | 180 | } else { |
150 | u64 oldmsr = mfmsr(); | 181 | u64 oldmsr = mfmsr(); |
@@ -165,7 +196,6 @@ void flush_altivec_to_thread(struct task_struct *tsk) | |||
165 | preempt_disable(); | 196 | preempt_disable(); |
166 | if (tsk->thread.regs->msr & MSR_VEC) { | 197 | if (tsk->thread.regs->msr & MSR_VEC) { |
167 | BUG_ON(tsk != current); | 198 | BUG_ON(tsk != current); |
168 | check_if_tm_restore_required(tsk); | ||
169 | giveup_altivec(tsk); | 199 | giveup_altivec(tsk); |
170 | } | 200 | } |
171 | preempt_enable(); | 201 | preempt_enable(); |
@@ -214,6 +244,20 @@ EXPORT_SYMBOL_GPL(flush_vsx_to_thread); | |||
214 | #endif /* CONFIG_VSX */ | 244 | #endif /* CONFIG_VSX */ |
215 | 245 | ||
216 | #ifdef CONFIG_SPE | 246 | #ifdef CONFIG_SPE |
247 | void giveup_spe(struct task_struct *tsk) | ||
248 | { | ||
249 | u64 oldmsr = mfmsr(); | ||
250 | u64 newmsr; | ||
251 | |||
252 | check_if_tm_restore_required(tsk); | ||
253 | |||
254 | newmsr = oldmsr | MSR_SPE; | ||
255 | if (oldmsr != newmsr) | ||
256 | mtmsr_isync(newmsr); | ||
257 | |||
258 | __giveup_spe(tsk); | ||
259 | } | ||
260 | EXPORT_SYMBOL(giveup_spe); | ||
217 | 261 | ||
218 | void enable_kernel_spe(void) | 262 | void enable_kernel_spe(void) |
219 | { | 263 | { |
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index b31528c30253..6e925b40a484 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S | |||
@@ -112,17 +112,11 @@ _GLOBAL(load_up_altivec) | |||
112 | blr | 112 | blr |
113 | 113 | ||
114 | /* | 114 | /* |
115 | * giveup_altivec(tsk) | 115 | * __giveup_altivec(tsk) |
116 | * Disable VMX for the task given as the argument, | 116 | * Disable VMX for the task given as the argument, |
117 | * and save the vector registers in its thread_struct. | 117 | * and save the vector registers in its thread_struct. |
118 | * Enables the VMX for use in the kernel on return. | ||
119 | */ | 118 | */ |
120 | _GLOBAL(giveup_altivec) | 119 | _GLOBAL(__giveup_altivec) |
121 | mfmsr r5 | ||
122 | oris r5,r5,MSR_VEC@h | ||
123 | SYNC | ||
124 | MTMSRD(r5) /* enable use of VMX now */ | ||
125 | isync | ||
126 | addi r3,r3,THREAD /* want THREAD of task */ | 120 | addi r3,r3,THREAD /* want THREAD of task */ |
127 | PPC_LL r7,THREAD_VRSAVEAREA(r3) | 121 | PPC_LL r7,THREAD_VRSAVEAREA(r3) |
128 | PPC_LL r5,PT_REGS(r3) | 122 | PPC_LL r5,PT_REGS(r3) |