diff options
author | Andreas Schwab <schwab@linux-m68k.org> | 2010-08-21 07:43:20 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-09-02 00:07:32 -0400 |
commit | 05d77ac90c0d260ae18decd70507dc4f5b71a2cb (patch) | |
tree | 84cca10e89f60d8530d9951e8e20931329154ef2 /arch/powerpc | |
parent | 872e439a45ed4a4bd499bc55cb0dffa74027f749 (diff) |
powerpc: Remove fpscr use from [kvm_]cvt_{fd,df}
Neither lfs nor stfs touch the fpscr, so remove the restore/save of it
around them.
Signed-off-by: Andreas Schwab <schwab@linux-m68k.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/kvm_fpu.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/system.h | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/align.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/fpu.S | 10 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_paired_singles.c | 44 | ||||
-rw-r--r-- | arch/powerpc/kvm/fpu.S | 8 |
6 files changed, 26 insertions, 48 deletions
diff --git a/arch/powerpc/include/asm/kvm_fpu.h b/arch/powerpc/include/asm/kvm_fpu.h index c3d4f0518a67..92daae132492 100644 --- a/arch/powerpc/include/asm/kvm_fpu.h +++ b/arch/powerpc/include/asm/kvm_fpu.h | |||
@@ -82,7 +82,7 @@ FPD_THREE_IN(fmadd) | |||
82 | FPD_THREE_IN(fnmsub) | 82 | FPD_THREE_IN(fnmsub) |
83 | FPD_THREE_IN(fnmadd) | 83 | FPD_THREE_IN(fnmadd) |
84 | 84 | ||
85 | extern void kvm_cvt_fd(u32 *from, u64 *to, u64 *fpscr); | 85 | extern void kvm_cvt_fd(u32 *from, u64 *to); |
86 | extern void kvm_cvt_df(u64 *from, u32 *to, u64 *fpscr); | 86 | extern void kvm_cvt_df(u64 *from, u32 *to); |
87 | 87 | ||
88 | #endif | 88 | #endif |
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h index 6c294acac848..0b3fe78be71b 100644 --- a/arch/powerpc/include/asm/system.h +++ b/arch/powerpc/include/asm/system.h | |||
@@ -154,8 +154,8 @@ extern void enable_kernel_spe(void); | |||
154 | extern void giveup_spe(struct task_struct *); | 154 | extern void giveup_spe(struct task_struct *); |
155 | extern void load_up_spe(struct task_struct *); | 155 | extern void load_up_spe(struct task_struct *); |
156 | extern int fix_alignment(struct pt_regs *); | 156 | extern int fix_alignment(struct pt_regs *); |
157 | extern void cvt_fd(float *from, double *to, struct thread_struct *thread); | 157 | extern void cvt_fd(float *from, double *to); |
158 | extern void cvt_df(double *from, float *to, struct thread_struct *thread); | 158 | extern void cvt_df(double *from, float *to); |
159 | 159 | ||
160 | #ifndef CONFIG_SMP | 160 | #ifndef CONFIG_SMP |
161 | extern void discard_lazy_cpu_state(void); | 161 | extern void discard_lazy_cpu_state(void); |
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index b876e989220b..8184ee97e484 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c | |||
@@ -889,7 +889,7 @@ int fix_alignment(struct pt_regs *regs) | |||
889 | #ifdef CONFIG_PPC_FPU | 889 | #ifdef CONFIG_PPC_FPU |
890 | preempt_disable(); | 890 | preempt_disable(); |
891 | enable_kernel_fp(); | 891 | enable_kernel_fp(); |
892 | cvt_df(&data.dd, (float *)&data.v[4], ¤t->thread); | 892 | cvt_df(&data.dd, (float *)&data.v[4]); |
893 | preempt_enable(); | 893 | preempt_enable(); |
894 | #else | 894 | #else |
895 | return 0; | 895 | return 0; |
@@ -933,7 +933,7 @@ int fix_alignment(struct pt_regs *regs) | |||
933 | #ifdef CONFIG_PPC_FPU | 933 | #ifdef CONFIG_PPC_FPU |
934 | preempt_disable(); | 934 | preempt_disable(); |
935 | enable_kernel_fp(); | 935 | enable_kernel_fp(); |
936 | cvt_fd((float *)&data.v[4], &data.dd, ¤t->thread); | 936 | cvt_fd((float *)&data.v[4], &data.dd); |
937 | preempt_enable(); | 937 | preempt_enable(); |
938 | #else | 938 | #else |
939 | return 0; | 939 | return 0; |
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index fc8f5b14019c..e86c040ae585 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S | |||
@@ -163,24 +163,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |||
163 | /* | 163 | /* |
164 | * These are used in the alignment trap handler when emulating | 164 | * These are used in the alignment trap handler when emulating |
165 | * single-precision loads and stores. | 165 | * single-precision loads and stores. |
166 | * We restore and save the fpscr so the task gets the same result | ||
167 | * and exceptions as if the cpu had performed the load or store. | ||
168 | */ | 166 | */ |
169 | 167 | ||
170 | _GLOBAL(cvt_fd) | 168 | _GLOBAL(cvt_fd) |
171 | lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */ | ||
172 | MTFSF_L(0) | ||
173 | lfs 0,0(r3) | 169 | lfs 0,0(r3) |
174 | stfd 0,0(r4) | 170 | stfd 0,0(r4) |
175 | mffs 0 | ||
176 | stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */ | ||
177 | blr | 171 | blr |
178 | 172 | ||
179 | _GLOBAL(cvt_df) | 173 | _GLOBAL(cvt_df) |
180 | lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */ | ||
181 | MTFSF_L(0) | ||
182 | lfd 0,0(r3) | 174 | lfd 0,0(r3) |
183 | stfs 0,0(r4) | 175 | stfs 0,0(r4) |
184 | mffs 0 | ||
185 | stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */ | ||
186 | blr | 176 | blr |
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c index 474f2e24050a..35a701f3ece4 100644 --- a/arch/powerpc/kvm/book3s_paired_singles.c +++ b/arch/powerpc/kvm/book3s_paired_singles.c | |||
@@ -159,7 +159,7 @@ | |||
159 | 159 | ||
160 | static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) | 160 | static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) |
161 | { | 161 | { |
162 | kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt], &vcpu->arch.fpscr); | 162 | kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt]); |
163 | } | 163 | } |
164 | 164 | ||
165 | static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) | 165 | static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) |
@@ -204,7 +204,7 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
204 | /* put in registers */ | 204 | /* put in registers */ |
205 | switch (ls_type) { | 205 | switch (ls_type) { |
206 | case FPU_LS_SINGLE: | 206 | case FPU_LS_SINGLE: |
207 | kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs], &vcpu->arch.fpscr); | 207 | kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs]); |
208 | vcpu->arch.qpr[rs] = *((u32*)tmp); | 208 | vcpu->arch.qpr[rs] = *((u32*)tmp); |
209 | break; | 209 | break; |
210 | case FPU_LS_DOUBLE: | 210 | case FPU_LS_DOUBLE: |
@@ -230,7 +230,7 @@ static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
230 | 230 | ||
231 | switch (ls_type) { | 231 | switch (ls_type) { |
232 | case FPU_LS_SINGLE: | 232 | case FPU_LS_SINGLE: |
233 | kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp, &vcpu->arch.fpscr); | 233 | kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp); |
234 | val = *((u32*)tmp); | 234 | val = *((u32*)tmp); |
235 | len = sizeof(u32); | 235 | len = sizeof(u32); |
236 | break; | 236 | break; |
@@ -296,7 +296,7 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
296 | emulated = EMULATE_DONE; | 296 | emulated = EMULATE_DONE; |
297 | 297 | ||
298 | /* put in registers */ | 298 | /* put in registers */ |
299 | kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs], &vcpu->arch.fpscr); | 299 | kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs]); |
300 | vcpu->arch.qpr[rs] = tmp[1]; | 300 | vcpu->arch.qpr[rs] = tmp[1]; |
301 | 301 | ||
302 | dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0], | 302 | dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0], |
@@ -314,7 +314,7 @@ static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
314 | u32 tmp[2]; | 314 | u32 tmp[2]; |
315 | int len = w ? sizeof(u32) : sizeof(u64); | 315 | int len = w ? sizeof(u32) : sizeof(u64); |
316 | 316 | ||
317 | kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0], &vcpu->arch.fpscr); | 317 | kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0]); |
318 | tmp[1] = vcpu->arch.qpr[rs]; | 318 | tmp[1] = vcpu->arch.qpr[rs]; |
319 | 319 | ||
320 | r = kvmppc_st(vcpu, &addr, len, tmp, true); | 320 | r = kvmppc_st(vcpu, &addr, len, tmp, true); |
@@ -516,9 +516,9 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, | |||
516 | WARN_ON(rc); | 516 | WARN_ON(rc); |
517 | 517 | ||
518 | /* PS0 */ | 518 | /* PS0 */ |
519 | kvm_cvt_df(&fpr[reg_in1], &ps0_in1, &vcpu->arch.fpscr); | 519 | kvm_cvt_df(&fpr[reg_in1], &ps0_in1); |
520 | kvm_cvt_df(&fpr[reg_in2], &ps0_in2, &vcpu->arch.fpscr); | 520 | kvm_cvt_df(&fpr[reg_in2], &ps0_in2); |
521 | kvm_cvt_df(&fpr[reg_in3], &ps0_in3, &vcpu->arch.fpscr); | 521 | kvm_cvt_df(&fpr[reg_in3], &ps0_in3); |
522 | 522 | ||
523 | if (scalar & SCALAR_LOW) | 523 | if (scalar & SCALAR_LOW) |
524 | ps0_in2 = qpr[reg_in2]; | 524 | ps0_in2 = qpr[reg_in2]; |
@@ -529,7 +529,7 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, | |||
529 | ps0_in1, ps0_in2, ps0_in3, ps0_out); | 529 | ps0_in1, ps0_in2, ps0_in3, ps0_out); |
530 | 530 | ||
531 | if (!(scalar & SCALAR_NO_PS0)) | 531 | if (!(scalar & SCALAR_NO_PS0)) |
532 | kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr); | 532 | kvm_cvt_fd(&ps0_out, &fpr[reg_out]); |
533 | 533 | ||
534 | /* PS1 */ | 534 | /* PS1 */ |
535 | ps1_in1 = qpr[reg_in1]; | 535 | ps1_in1 = qpr[reg_in1]; |
@@ -566,12 +566,12 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, | |||
566 | WARN_ON(rc); | 566 | WARN_ON(rc); |
567 | 567 | ||
568 | /* PS0 */ | 568 | /* PS0 */ |
569 | kvm_cvt_df(&fpr[reg_in1], &ps0_in1, &vcpu->arch.fpscr); | 569 | kvm_cvt_df(&fpr[reg_in1], &ps0_in1); |
570 | 570 | ||
571 | if (scalar & SCALAR_LOW) | 571 | if (scalar & SCALAR_LOW) |
572 | ps0_in2 = qpr[reg_in2]; | 572 | ps0_in2 = qpr[reg_in2]; |
573 | else | 573 | else |
574 | kvm_cvt_df(&fpr[reg_in2], &ps0_in2, &vcpu->arch.fpscr); | 574 | kvm_cvt_df(&fpr[reg_in2], &ps0_in2); |
575 | 575 | ||
576 | func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2); | 576 | func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2); |
577 | 577 | ||
@@ -579,7 +579,7 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, | |||
579 | dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n", | 579 | dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n", |
580 | ps0_in1, ps0_in2, ps0_out); | 580 | ps0_in1, ps0_in2, ps0_out); |
581 | 581 | ||
582 | kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr); | 582 | kvm_cvt_fd(&ps0_out, &fpr[reg_out]); |
583 | } | 583 | } |
584 | 584 | ||
585 | /* PS1 */ | 585 | /* PS1 */ |
@@ -615,13 +615,13 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc, | |||
615 | WARN_ON(rc); | 615 | WARN_ON(rc); |
616 | 616 | ||
617 | /* PS0 */ | 617 | /* PS0 */ |
618 | kvm_cvt_df(&fpr[reg_in], &ps0_in, &vcpu->arch.fpscr); | 618 | kvm_cvt_df(&fpr[reg_in], &ps0_in); |
619 | func(&vcpu->arch.fpscr, &ps0_out, &ps0_in); | 619 | func(&vcpu->arch.fpscr, &ps0_out, &ps0_in); |
620 | 620 | ||
621 | dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n", | 621 | dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n", |
622 | ps0_in, ps0_out); | 622 | ps0_in, ps0_out); |
623 | 623 | ||
624 | kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr); | 624 | kvm_cvt_fd(&ps0_out, &fpr[reg_out]); |
625 | 625 | ||
626 | /* PS1 */ | 626 | /* PS1 */ |
627 | ps1_in = qpr[reg_in]; | 627 | ps1_in = qpr[reg_in]; |
@@ -671,7 +671,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
671 | #ifdef DEBUG | 671 | #ifdef DEBUG |
672 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { | 672 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { |
673 | u32 f; | 673 | u32 f; |
674 | kvm_cvt_df(&vcpu->arch.fpr[i], &f, &vcpu->arch.fpscr); | 674 | kvm_cvt_df(&vcpu->arch.fpr[i], &f); |
675 | dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n", | 675 | dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n", |
676 | i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]); | 676 | i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]); |
677 | } | 677 | } |
@@ -796,8 +796,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
796 | vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; | 796 | vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; |
797 | /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ | 797 | /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ |
798 | kvm_cvt_df(&vcpu->arch.fpr[ax_rb], | 798 | kvm_cvt_df(&vcpu->arch.fpr[ax_rb], |
799 | &vcpu->arch.qpr[ax_rd], | 799 | &vcpu->arch.qpr[ax_rd]); |
800 | &vcpu->arch.fpscr); | ||
801 | break; | 800 | break; |
802 | case OP_4X_PS_MERGE01: | 801 | case OP_4X_PS_MERGE01: |
803 | WARN_ON(rcomp); | 802 | WARN_ON(rcomp); |
@@ -808,19 +807,16 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
808 | WARN_ON(rcomp); | 807 | WARN_ON(rcomp); |
809 | /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ | 808 | /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ |
810 | kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], | 809 | kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], |
811 | &vcpu->arch.fpr[ax_rd], | 810 | &vcpu->arch.fpr[ax_rd]); |
812 | &vcpu->arch.fpscr); | ||
813 | /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ | 811 | /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ |
814 | kvm_cvt_df(&vcpu->arch.fpr[ax_rb], | 812 | kvm_cvt_df(&vcpu->arch.fpr[ax_rb], |
815 | &vcpu->arch.qpr[ax_rd], | 813 | &vcpu->arch.qpr[ax_rd]); |
816 | &vcpu->arch.fpscr); | ||
817 | break; | 814 | break; |
818 | case OP_4X_PS_MERGE11: | 815 | case OP_4X_PS_MERGE11: |
819 | WARN_ON(rcomp); | 816 | WARN_ON(rcomp); |
820 | /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ | 817 | /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ |
821 | kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], | 818 | kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], |
822 | &vcpu->arch.fpr[ax_rd], | 819 | &vcpu->arch.fpr[ax_rd]); |
823 | &vcpu->arch.fpscr); | ||
824 | vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; | 820 | vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; |
825 | break; | 821 | break; |
826 | } | 822 | } |
@@ -1255,7 +1251,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1255 | #ifdef DEBUG | 1251 | #ifdef DEBUG |
1256 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { | 1252 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { |
1257 | u32 f; | 1253 | u32 f; |
1258 | kvm_cvt_df(&vcpu->arch.fpr[i], &f, &vcpu->arch.fpscr); | 1254 | kvm_cvt_df(&vcpu->arch.fpr[i], &f); |
1259 | dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f); | 1255 | dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f); |
1260 | } | 1256 | } |
1261 | #endif | 1257 | #endif |
diff --git a/arch/powerpc/kvm/fpu.S b/arch/powerpc/kvm/fpu.S index cb34bbe16113..bf68d597549e 100644 --- a/arch/powerpc/kvm/fpu.S +++ b/arch/powerpc/kvm/fpu.S | |||
@@ -273,19 +273,11 @@ FPD_THREE_IN(fnmsub) | |||
273 | FPD_THREE_IN(fnmadd) | 273 | FPD_THREE_IN(fnmadd) |
274 | 274 | ||
275 | _GLOBAL(kvm_cvt_fd) | 275 | _GLOBAL(kvm_cvt_fd) |
276 | lfd 0,0(r5) /* load up fpscr value */ | ||
277 | MTFSF_L(0) | ||
278 | lfs 0,0(r3) | 276 | lfs 0,0(r3) |
279 | stfd 0,0(r4) | 277 | stfd 0,0(r4) |
280 | mffs 0 | ||
281 | stfd 0,0(r5) /* save new fpscr value */ | ||
282 | blr | 278 | blr |
283 | 279 | ||
284 | _GLOBAL(kvm_cvt_df) | 280 | _GLOBAL(kvm_cvt_df) |
285 | lfd 0,0(r5) /* load up fpscr value */ | ||
286 | MTFSF_L(0) | ||
287 | lfd 0,0(r3) | 281 | lfd 0,0(r3) |
288 | stfs 0,0(r4) | 282 | stfs 0,0(r4) |
289 | mffs 0 | ||
290 | stfd 0,0(r5) /* save new fpscr value */ | ||
291 | blr | 283 | blr |