aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2013-10-15 05:43:02 -0400
committerAlexander Graf <agraf@suse.de>2014-01-09 04:15:00 -0500
commitefff19122315f1431f6b02cd2983b15f5d3957bd (patch)
tree7c0e937099931df69b9efb510a9030c419767db9 /arch/powerpc
parent09548fdaf32ce77a68e7f9a8a3098c1306b04858 (diff)
KVM: PPC: Store FP/VSX/VMX state in thread_fp/vr_state structures
This uses struct thread_fp_state and struct thread_vr_state to store the floating-point, VMX/Altivec and VSX state, rather than flat arrays. This makes transferring the state to/from the thread_struct simpler and allows us to unify the get/set_one_reg implementations for the VSX registers. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_host.h12
-rw-r--r--arch/powerpc/kernel/asm-offsets.c11
-rw-r--r--arch/powerpc/kvm/book3s.c38
-rw-r--r--arch/powerpc/kvm/book3s_hv.c42
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S4
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c169
-rw-r--r--arch/powerpc/kvm/book3s_pr.c63
-rw-r--r--arch/powerpc/kvm/booke.c8
-rw-r--r--arch/powerpc/kvm/powerpc.c4
9 files changed, 131 insertions, 220 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 237d1d25b448..3ca0b430eaee 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -410,8 +410,7 @@ struct kvm_vcpu_arch {
410 410
411 ulong gpr[32]; 411 ulong gpr[32];
412 412
413 u64 fpr[32]; 413 struct thread_fp_state fp;
414 u64 fpscr;
415 414
416#ifdef CONFIG_SPE 415#ifdef CONFIG_SPE
417 ulong evr[32]; 416 ulong evr[32];
@@ -420,12 +419,7 @@ struct kvm_vcpu_arch {
420 u64 acc; 419 u64 acc;
421#endif 420#endif
422#ifdef CONFIG_ALTIVEC 421#ifdef CONFIG_ALTIVEC
423 vector128 vr[32]; 422 struct thread_vr_state vr;
424 vector128 vscr;
425#endif
426
427#ifdef CONFIG_VSX
428 u64 vsr[64];
429#endif 423#endif
430 424
431#ifdef CONFIG_KVM_BOOKE_HV 425#ifdef CONFIG_KVM_BOOKE_HV
@@ -619,6 +613,8 @@ struct kvm_vcpu_arch {
619#endif 613#endif
620}; 614};
621 615
616#define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
617
622/* Values for vcpu->arch.state */ 618/* Values for vcpu->arch.state */
623#define KVMPPC_VCPU_NOTREADY 0 619#define KVMPPC_VCPU_NOTREADY 0
624#define KVMPPC_VCPU_RUNNABLE 1 620#define KVMPPC_VCPU_RUNNABLE 1
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 2ea5cc033ec8..8403f9031d93 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -425,14 +425,11 @@ int main(void)
425 DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid)); 425 DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid));
426 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 426 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
427 DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); 427 DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
428 DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr)); 428 DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr));
429 DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr)); 429 DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fp.fpscr));
430#ifdef CONFIG_ALTIVEC 430#ifdef CONFIG_ALTIVEC
431 DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr)); 431 DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr));
432 DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr)); 432 DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vr.vscr));
433#endif
434#ifdef CONFIG_VSX
435 DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr));
436#endif 433#endif
437 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); 434 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
438 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); 435 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 48cf91bc862f..94e597e6f15c 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -577,10 +577,10 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
577 break; 577 break;
578 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 578 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
579 i = reg->id - KVM_REG_PPC_FPR0; 579 i = reg->id - KVM_REG_PPC_FPR0;
580 val = get_reg_val(reg->id, vcpu->arch.fpr[i]); 580 val = get_reg_val(reg->id, VCPU_FPR(vcpu, i));
581 break; 581 break;
582 case KVM_REG_PPC_FPSCR: 582 case KVM_REG_PPC_FPSCR:
583 val = get_reg_val(reg->id, vcpu->arch.fpscr); 583 val = get_reg_val(reg->id, vcpu->arch.fp.fpscr);
584 break; 584 break;
585#ifdef CONFIG_ALTIVEC 585#ifdef CONFIG_ALTIVEC
586 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: 586 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
@@ -588,19 +588,30 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
588 r = -ENXIO; 588 r = -ENXIO;
589 break; 589 break;
590 } 590 }
591 val.vval = vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0]; 591 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
592 break; 592 break;
593 case KVM_REG_PPC_VSCR: 593 case KVM_REG_PPC_VSCR:
594 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 594 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
595 r = -ENXIO; 595 r = -ENXIO;
596 break; 596 break;
597 } 597 }
598 val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]); 598 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
599 break; 599 break;
600 case KVM_REG_PPC_VRSAVE: 600 case KVM_REG_PPC_VRSAVE:
601 val = get_reg_val(reg->id, vcpu->arch.vrsave); 601 val = get_reg_val(reg->id, vcpu->arch.vrsave);
602 break; 602 break;
603#endif /* CONFIG_ALTIVEC */ 603#endif /* CONFIG_ALTIVEC */
604#ifdef CONFIG_VSX
605 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
606 if (cpu_has_feature(CPU_FTR_VSX)) {
607 long int i = reg->id - KVM_REG_PPC_VSR0;
608 val.vsxval[0] = vcpu->arch.fp.fpr[i][0];
609 val.vsxval[1] = vcpu->arch.fp.fpr[i][1];
610 } else {
611 r = -ENXIO;
612 }
613 break;
614#endif /* CONFIG_VSX */
604 case KVM_REG_PPC_DEBUG_INST: { 615 case KVM_REG_PPC_DEBUG_INST: {
605 u32 opcode = INS_TW; 616 u32 opcode = INS_TW;
606 r = copy_to_user((u32 __user *)(long)reg->addr, 617 r = copy_to_user((u32 __user *)(long)reg->addr,
@@ -656,10 +667,10 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
656 break; 667 break;
657 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 668 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
658 i = reg->id - KVM_REG_PPC_FPR0; 669 i = reg->id - KVM_REG_PPC_FPR0;
659 vcpu->arch.fpr[i] = set_reg_val(reg->id, val); 670 VCPU_FPR(vcpu, i) = set_reg_val(reg->id, val);
660 break; 671 break;
661 case KVM_REG_PPC_FPSCR: 672 case KVM_REG_PPC_FPSCR:
662 vcpu->arch.fpscr = set_reg_val(reg->id, val); 673 vcpu->arch.fp.fpscr = set_reg_val(reg->id, val);
663 break; 674 break;
664#ifdef CONFIG_ALTIVEC 675#ifdef CONFIG_ALTIVEC
665 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: 676 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
@@ -667,14 +678,14 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
667 r = -ENXIO; 678 r = -ENXIO;
668 break; 679 break;
669 } 680 }
670 vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; 681 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
671 break; 682 break;
672 case KVM_REG_PPC_VSCR: 683 case KVM_REG_PPC_VSCR:
673 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 684 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
674 r = -ENXIO; 685 r = -ENXIO;
675 break; 686 break;
676 } 687 }
677 vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val); 688 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
678 break; 689 break;
679 case KVM_REG_PPC_VRSAVE: 690 case KVM_REG_PPC_VRSAVE:
680 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 691 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
@@ -684,6 +695,17 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
684 vcpu->arch.vrsave = set_reg_val(reg->id, val); 695 vcpu->arch.vrsave = set_reg_val(reg->id, val);
685 break; 696 break;
686#endif /* CONFIG_ALTIVEC */ 697#endif /* CONFIG_ALTIVEC */
698#ifdef CONFIG_VSX
699 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
700 if (cpu_has_feature(CPU_FTR_VSX)) {
701 long int i = reg->id - KVM_REG_PPC_VSR0;
702 vcpu->arch.fp.fpr[i][0] = val.vsxval[0];
703 vcpu->arch.fp.fpr[i][1] = val.vsxval[1];
704 } else {
705 r = -ENXIO;
706 }
707 break;
708#endif /* CONFIG_VSX */
687#ifdef CONFIG_KVM_XICS 709#ifdef CONFIG_KVM_XICS
688 case KVM_REG_PPC_ICP_STATE: 710 case KVM_REG_PPC_ICP_STATE:
689 if (!vcpu->arch.icp) { 711 if (!vcpu->arch.icp) {
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 088a6e54c998..461f55566167 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -811,27 +811,6 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
811 case KVM_REG_PPC_SDAR: 811 case KVM_REG_PPC_SDAR:
812 *val = get_reg_val(id, vcpu->arch.sdar); 812 *val = get_reg_val(id, vcpu->arch.sdar);
813 break; 813 break;
814#ifdef CONFIG_VSX
815 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
816 if (cpu_has_feature(CPU_FTR_VSX)) {
817 /* VSX => FP reg i is stored in arch.vsr[2*i] */
818 long int i = id - KVM_REG_PPC_FPR0;
819 *val = get_reg_val(id, vcpu->arch.vsr[2 * i]);
820 } else {
821 /* let generic code handle it */
822 r = -EINVAL;
823 }
824 break;
825 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
826 if (cpu_has_feature(CPU_FTR_VSX)) {
827 long int i = id - KVM_REG_PPC_VSR0;
828 val->vsxval[0] = vcpu->arch.vsr[2 * i];
829 val->vsxval[1] = vcpu->arch.vsr[2 * i + 1];
830 } else {
831 r = -ENXIO;
832 }
833 break;
834#endif /* CONFIG_VSX */
835 case KVM_REG_PPC_VPA_ADDR: 814 case KVM_REG_PPC_VPA_ADDR:
836 spin_lock(&vcpu->arch.vpa_update_lock); 815 spin_lock(&vcpu->arch.vpa_update_lock);
837 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); 816 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
@@ -914,27 +893,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
914 case KVM_REG_PPC_SDAR: 893 case KVM_REG_PPC_SDAR:
915 vcpu->arch.sdar = set_reg_val(id, *val); 894 vcpu->arch.sdar = set_reg_val(id, *val);
916 break; 895 break;
917#ifdef CONFIG_VSX
918 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
919 if (cpu_has_feature(CPU_FTR_VSX)) {
920 /* VSX => FP reg i is stored in arch.vsr[2*i] */
921 long int i = id - KVM_REG_PPC_FPR0;
922 vcpu->arch.vsr[2 * i] = set_reg_val(id, *val);
923 } else {
924 /* let generic code handle it */
925 r = -EINVAL;
926 }
927 break;
928 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
929 if (cpu_has_feature(CPU_FTR_VSX)) {
930 long int i = id - KVM_REG_PPC_VSR0;
931 vcpu->arch.vsr[2 * i] = val->vsxval[0];
932 vcpu->arch.vsr[2 * i + 1] = val->vsxval[1];
933 } else {
934 r = -ENXIO;
935 }
936 break;
937#endif /* CONFIG_VSX */
938 case KVM_REG_PPC_VPA_ADDR: 896 case KVM_REG_PPC_VPA_ADDR:
939 addr = set_reg_val(id, *val); 897 addr = set_reg_val(id, *val);
940 r = -EINVAL; 898 r = -EINVAL;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index d5ddc2d10748..47fd536fb13b 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1889,7 +1889,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1889BEGIN_FTR_SECTION 1889BEGIN_FTR_SECTION
1890 reg = 0 1890 reg = 0
1891 .rept 32 1891 .rept 32
1892 li r6,reg*16+VCPU_VSRS 1892 li r6,reg*16+VCPU_FPRS
1893 STXVD2X(reg,R6,R3) 1893 STXVD2X(reg,R6,R3)
1894 reg = reg + 1 1894 reg = reg + 1
1895 .endr 1895 .endr
@@ -1951,7 +1951,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1951BEGIN_FTR_SECTION 1951BEGIN_FTR_SECTION
1952 reg = 0 1952 reg = 0
1953 .rept 32 1953 .rept 32
1954 li r7,reg*16+VCPU_VSRS 1954 li r7,reg*16+VCPU_FPRS
1955 LXVD2X(reg,R7,R4) 1955 LXVD2X(reg,R7,R4)
1956 reg = reg + 1 1956 reg = reg + 1
1957 .endr 1957 .endr
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
index a59a25a13218..c1abd95063f4 100644
--- a/arch/powerpc/kvm/book3s_paired_singles.c
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -160,7 +160,7 @@
160 160
161static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) 161static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
162{ 162{
163 kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt]); 163 kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]);
164} 164}
165 165
166static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) 166static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
@@ -207,11 +207,11 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
207 /* put in registers */ 207 /* put in registers */
208 switch (ls_type) { 208 switch (ls_type) {
209 case FPU_LS_SINGLE: 209 case FPU_LS_SINGLE:
210 kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs]); 210 kvm_cvt_fd((u32*)tmp, &VCPU_FPR(vcpu, rs));
211 vcpu->arch.qpr[rs] = *((u32*)tmp); 211 vcpu->arch.qpr[rs] = *((u32*)tmp);
212 break; 212 break;
213 case FPU_LS_DOUBLE: 213 case FPU_LS_DOUBLE:
214 vcpu->arch.fpr[rs] = *((u64*)tmp); 214 VCPU_FPR(vcpu, rs) = *((u64*)tmp);
215 break; 215 break;
216 } 216 }
217 217
@@ -233,18 +233,18 @@ static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
233 233
234 switch (ls_type) { 234 switch (ls_type) {
235 case FPU_LS_SINGLE: 235 case FPU_LS_SINGLE:
236 kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp); 236 kvm_cvt_df(&VCPU_FPR(vcpu, rs), (u32*)tmp);
237 val = *((u32*)tmp); 237 val = *((u32*)tmp);
238 len = sizeof(u32); 238 len = sizeof(u32);
239 break; 239 break;
240 case FPU_LS_SINGLE_LOW: 240 case FPU_LS_SINGLE_LOW:
241 *((u32*)tmp) = vcpu->arch.fpr[rs]; 241 *((u32*)tmp) = VCPU_FPR(vcpu, rs);
242 val = vcpu->arch.fpr[rs] & 0xffffffff; 242 val = VCPU_FPR(vcpu, rs) & 0xffffffff;
243 len = sizeof(u32); 243 len = sizeof(u32);
244 break; 244 break;
245 case FPU_LS_DOUBLE: 245 case FPU_LS_DOUBLE:
246 *((u64*)tmp) = vcpu->arch.fpr[rs]; 246 *((u64*)tmp) = VCPU_FPR(vcpu, rs);
247 val = vcpu->arch.fpr[rs]; 247 val = VCPU_FPR(vcpu, rs);
248 len = sizeof(u64); 248 len = sizeof(u64);
249 break; 249 break;
250 default: 250 default:
@@ -301,7 +301,7 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
301 emulated = EMULATE_DONE; 301 emulated = EMULATE_DONE;
302 302
303 /* put in registers */ 303 /* put in registers */
304 kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs]); 304 kvm_cvt_fd(&tmp[0], &VCPU_FPR(vcpu, rs));
305 vcpu->arch.qpr[rs] = tmp[1]; 305 vcpu->arch.qpr[rs] = tmp[1];
306 306
307 dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0], 307 dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0],
@@ -319,7 +319,7 @@ static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
319 u32 tmp[2]; 319 u32 tmp[2];
320 int len = w ? sizeof(u32) : sizeof(u64); 320 int len = w ? sizeof(u32) : sizeof(u64);
321 321
322 kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0]); 322 kvm_cvt_df(&VCPU_FPR(vcpu, rs), &tmp[0]);
323 tmp[1] = vcpu->arch.qpr[rs]; 323 tmp[1] = vcpu->arch.qpr[rs];
324 324
325 r = kvmppc_st(vcpu, &addr, len, tmp, true); 325 r = kvmppc_st(vcpu, &addr, len, tmp, true);
@@ -512,7 +512,6 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
512 u32 *src2, u32 *src3)) 512 u32 *src2, u32 *src3))
513{ 513{
514 u32 *qpr = vcpu->arch.qpr; 514 u32 *qpr = vcpu->arch.qpr;
515 u64 *fpr = vcpu->arch.fpr;
516 u32 ps0_out; 515 u32 ps0_out;
517 u32 ps0_in1, ps0_in2, ps0_in3; 516 u32 ps0_in1, ps0_in2, ps0_in3;
518 u32 ps1_in1, ps1_in2, ps1_in3; 517 u32 ps1_in1, ps1_in2, ps1_in3;
@@ -521,20 +520,20 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
521 WARN_ON(rc); 520 WARN_ON(rc);
522 521
523 /* PS0 */ 522 /* PS0 */
524 kvm_cvt_df(&fpr[reg_in1], &ps0_in1); 523 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1);
525 kvm_cvt_df(&fpr[reg_in2], &ps0_in2); 524 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2);
526 kvm_cvt_df(&fpr[reg_in3], &ps0_in3); 525 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in3), &ps0_in3);
527 526
528 if (scalar & SCALAR_LOW) 527 if (scalar & SCALAR_LOW)
529 ps0_in2 = qpr[reg_in2]; 528 ps0_in2 = qpr[reg_in2];
530 529
531 func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3); 530 func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3);
532 531
533 dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", 532 dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
534 ps0_in1, ps0_in2, ps0_in3, ps0_out); 533 ps0_in1, ps0_in2, ps0_in3, ps0_out);
535 534
536 if (!(scalar & SCALAR_NO_PS0)) 535 if (!(scalar & SCALAR_NO_PS0))
537 kvm_cvt_fd(&ps0_out, &fpr[reg_out]); 536 kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
538 537
539 /* PS1 */ 538 /* PS1 */
540 ps1_in1 = qpr[reg_in1]; 539 ps1_in1 = qpr[reg_in1];
@@ -545,7 +544,7 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
545 ps1_in2 = ps0_in2; 544 ps1_in2 = ps0_in2;
546 545
547 if (!(scalar & SCALAR_NO_PS1)) 546 if (!(scalar & SCALAR_NO_PS1))
548 func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3); 547 func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3);
549 548
550 dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", 549 dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
551 ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]); 550 ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]);
@@ -561,7 +560,6 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
561 u32 *src2)) 560 u32 *src2))
562{ 561{
563 u32 *qpr = vcpu->arch.qpr; 562 u32 *qpr = vcpu->arch.qpr;
564 u64 *fpr = vcpu->arch.fpr;
565 u32 ps0_out; 563 u32 ps0_out;
566 u32 ps0_in1, ps0_in2; 564 u32 ps0_in1, ps0_in2;
567 u32 ps1_out; 565 u32 ps1_out;
@@ -571,20 +569,20 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
571 WARN_ON(rc); 569 WARN_ON(rc);
572 570
573 /* PS0 */ 571 /* PS0 */
574 kvm_cvt_df(&fpr[reg_in1], &ps0_in1); 572 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1);
575 573
576 if (scalar & SCALAR_LOW) 574 if (scalar & SCALAR_LOW)
577 ps0_in2 = qpr[reg_in2]; 575 ps0_in2 = qpr[reg_in2];
578 else 576 else
579 kvm_cvt_df(&fpr[reg_in2], &ps0_in2); 577 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2);
580 578
581 func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2); 579 func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2);
582 580
583 if (!(scalar & SCALAR_NO_PS0)) { 581 if (!(scalar & SCALAR_NO_PS0)) {
584 dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n", 582 dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n",
585 ps0_in1, ps0_in2, ps0_out); 583 ps0_in1, ps0_in2, ps0_out);
586 584
587 kvm_cvt_fd(&ps0_out, &fpr[reg_out]); 585 kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
588 } 586 }
589 587
590 /* PS1 */ 588 /* PS1 */
@@ -594,7 +592,7 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
594 if (scalar & SCALAR_HIGH) 592 if (scalar & SCALAR_HIGH)
595 ps1_in2 = ps0_in2; 593 ps1_in2 = ps0_in2;
596 594
597 func(&vcpu->arch.fpscr, &ps1_out, &ps1_in1, &ps1_in2); 595 func(&vcpu->arch.fp.fpscr, &ps1_out, &ps1_in1, &ps1_in2);
598 596
599 if (!(scalar & SCALAR_NO_PS1)) { 597 if (!(scalar & SCALAR_NO_PS1)) {
600 qpr[reg_out] = ps1_out; 598 qpr[reg_out] = ps1_out;
@@ -612,7 +610,6 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
612 u32 *dst, u32 *src1)) 610 u32 *dst, u32 *src1))
613{ 611{
614 u32 *qpr = vcpu->arch.qpr; 612 u32 *qpr = vcpu->arch.qpr;
615 u64 *fpr = vcpu->arch.fpr;
616 u32 ps0_out, ps0_in; 613 u32 ps0_out, ps0_in;
617 u32 ps1_in; 614 u32 ps1_in;
618 615
@@ -620,17 +617,17 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
620 WARN_ON(rc); 617 WARN_ON(rc);
621 618
622 /* PS0 */ 619 /* PS0 */
623 kvm_cvt_df(&fpr[reg_in], &ps0_in); 620 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in), &ps0_in);
624 func(&vcpu->arch.fpscr, &ps0_out, &ps0_in); 621 func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in);
625 622
626 dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n", 623 dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n",
627 ps0_in, ps0_out); 624 ps0_in, ps0_out);
628 625
629 kvm_cvt_fd(&ps0_out, &fpr[reg_out]); 626 kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
630 627
631 /* PS1 */ 628 /* PS1 */
632 ps1_in = qpr[reg_in]; 629 ps1_in = qpr[reg_in];
633 func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in); 630 func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in);
634 631
635 dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n", 632 dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n",
636 ps1_in, qpr[reg_out]); 633 ps1_in, qpr[reg_out]);
@@ -649,10 +646,10 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
649 int ax_rc = inst_get_field(inst, 21, 25); 646 int ax_rc = inst_get_field(inst, 21, 25);
650 short full_d = inst_get_field(inst, 16, 31); 647 short full_d = inst_get_field(inst, 16, 31);
651 648
652 u64 *fpr_d = &vcpu->arch.fpr[ax_rd]; 649 u64 *fpr_d = &VCPU_FPR(vcpu, ax_rd);
653 u64 *fpr_a = &vcpu->arch.fpr[ax_ra]; 650 u64 *fpr_a = &VCPU_FPR(vcpu, ax_ra);
654 u64 *fpr_b = &vcpu->arch.fpr[ax_rb]; 651 u64 *fpr_b = &VCPU_FPR(vcpu, ax_rb);
655 u64 *fpr_c = &vcpu->arch.fpr[ax_rc]; 652 u64 *fpr_c = &VCPU_FPR(vcpu, ax_rc);
656 653
657 bool rcomp = (inst & 1) ? true : false; 654 bool rcomp = (inst & 1) ? true : false;
658 u32 cr = kvmppc_get_cr(vcpu); 655 u32 cr = kvmppc_get_cr(vcpu);
@@ -674,11 +671,11 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
674 /* Do we need to clear FE0 / FE1 here? Don't think so. */ 671 /* Do we need to clear FE0 / FE1 here? Don't think so. */
675 672
676#ifdef DEBUG 673#ifdef DEBUG
677 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { 674 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) {
678 u32 f; 675 u32 f;
679 kvm_cvt_df(&vcpu->arch.fpr[i], &f); 676 kvm_cvt_df(&VCPU_FPR(vcpu, i), &f);
680 dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n", 677 dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n",
681 i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]); 678 i, f, VCPU_FPR(vcpu, i), i, vcpu->arch.qpr[i]);
682 } 679 }
683#endif 680#endif
684 681
@@ -764,8 +761,8 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
764 break; 761 break;
765 } 762 }
766 case OP_4X_PS_NEG: 763 case OP_4X_PS_NEG:
767 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; 764 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
768 vcpu->arch.fpr[ax_rd] ^= 0x8000000000000000ULL; 765 VCPU_FPR(vcpu, ax_rd) ^= 0x8000000000000000ULL;
769 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 766 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
770 vcpu->arch.qpr[ax_rd] ^= 0x80000000; 767 vcpu->arch.qpr[ax_rd] ^= 0x80000000;
771 break; 768 break;
@@ -775,7 +772,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
775 break; 772 break;
776 case OP_4X_PS_MR: 773 case OP_4X_PS_MR:
777 WARN_ON(rcomp); 774 WARN_ON(rcomp);
778 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; 775 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
779 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 776 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
780 break; 777 break;
781 case OP_4X_PS_CMPO1: 778 case OP_4X_PS_CMPO1:
@@ -784,44 +781,44 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
784 break; 781 break;
785 case OP_4X_PS_NABS: 782 case OP_4X_PS_NABS:
786 WARN_ON(rcomp); 783 WARN_ON(rcomp);
787 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; 784 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
788 vcpu->arch.fpr[ax_rd] |= 0x8000000000000000ULL; 785 VCPU_FPR(vcpu, ax_rd) |= 0x8000000000000000ULL;
789 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 786 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
790 vcpu->arch.qpr[ax_rd] |= 0x80000000; 787 vcpu->arch.qpr[ax_rd] |= 0x80000000;
791 break; 788 break;
792 case OP_4X_PS_ABS: 789 case OP_4X_PS_ABS:
793 WARN_ON(rcomp); 790 WARN_ON(rcomp);
794 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; 791 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
795 vcpu->arch.fpr[ax_rd] &= ~0x8000000000000000ULL; 792 VCPU_FPR(vcpu, ax_rd) &= ~0x8000000000000000ULL;
796 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 793 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
797 vcpu->arch.qpr[ax_rd] &= ~0x80000000; 794 vcpu->arch.qpr[ax_rd] &= ~0x80000000;
798 break; 795 break;
799 case OP_4X_PS_MERGE00: 796 case OP_4X_PS_MERGE00:
800 WARN_ON(rcomp); 797 WARN_ON(rcomp);
801 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; 798 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra);
802 /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ 799 /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */
803 kvm_cvt_df(&vcpu->arch.fpr[ax_rb], 800 kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb),
804 &vcpu->arch.qpr[ax_rd]); 801 &vcpu->arch.qpr[ax_rd]);
805 break; 802 break;
806 case OP_4X_PS_MERGE01: 803 case OP_4X_PS_MERGE01:
807 WARN_ON(rcomp); 804 WARN_ON(rcomp);
808 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; 805 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra);
809 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 806 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
810 break; 807 break;
811 case OP_4X_PS_MERGE10: 808 case OP_4X_PS_MERGE10:
812 WARN_ON(rcomp); 809 WARN_ON(rcomp);
813 /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ 810 /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */
814 kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], 811 kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
815 &vcpu->arch.fpr[ax_rd]); 812 &VCPU_FPR(vcpu, ax_rd));
816 /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ 813 /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */
817 kvm_cvt_df(&vcpu->arch.fpr[ax_rb], 814 kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb),
818 &vcpu->arch.qpr[ax_rd]); 815 &vcpu->arch.qpr[ax_rd]);
819 break; 816 break;
820 case OP_4X_PS_MERGE11: 817 case OP_4X_PS_MERGE11:
821 WARN_ON(rcomp); 818 WARN_ON(rcomp);
822 /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ 819 /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */
823 kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], 820 kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
824 &vcpu->arch.fpr[ax_rd]); 821 &VCPU_FPR(vcpu, ax_rd));
825 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 822 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
826 break; 823 break;
827 } 824 }
@@ -856,7 +853,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
856 case OP_4A_PS_SUM1: 853 case OP_4A_PS_SUM1:
857 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, 854 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
858 ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds); 855 ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds);
859 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rc]; 856 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rc);
860 break; 857 break;
861 case OP_4A_PS_SUM0: 858 case OP_4A_PS_SUM0:
862 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, 859 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
@@ -1106,45 +1103,45 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
1106 case 59: 1103 case 59:
1107 switch (inst_get_field(inst, 21, 30)) { 1104 switch (inst_get_field(inst, 21, 30)) {
1108 case OP_59_FADDS: 1105 case OP_59_FADDS:
1109 fpd_fadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1106 fpd_fadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1110 kvmppc_sync_qpr(vcpu, ax_rd); 1107 kvmppc_sync_qpr(vcpu, ax_rd);
1111 break; 1108 break;
1112 case OP_59_FSUBS: 1109 case OP_59_FSUBS:
1113 fpd_fsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1110 fpd_fsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1114 kvmppc_sync_qpr(vcpu, ax_rd); 1111 kvmppc_sync_qpr(vcpu, ax_rd);
1115 break; 1112 break;
1116 case OP_59_FDIVS: 1113 case OP_59_FDIVS:
1117 fpd_fdivs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1114 fpd_fdivs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1118 kvmppc_sync_qpr(vcpu, ax_rd); 1115 kvmppc_sync_qpr(vcpu, ax_rd);
1119 break; 1116 break;
1120 case OP_59_FRES: 1117 case OP_59_FRES:
1121 fpd_fres(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1118 fpd_fres(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1122 kvmppc_sync_qpr(vcpu, ax_rd); 1119 kvmppc_sync_qpr(vcpu, ax_rd);
1123 break; 1120 break;
1124 case OP_59_FRSQRTES: 1121 case OP_59_FRSQRTES:
1125 fpd_frsqrtes(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1122 fpd_frsqrtes(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1126 kvmppc_sync_qpr(vcpu, ax_rd); 1123 kvmppc_sync_qpr(vcpu, ax_rd);
1127 break; 1124 break;
1128 } 1125 }
1129 switch (inst_get_field(inst, 26, 30)) { 1126 switch (inst_get_field(inst, 26, 30)) {
1130 case OP_59_FMULS: 1127 case OP_59_FMULS:
1131 fpd_fmuls(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c); 1128 fpd_fmuls(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c);
1132 kvmppc_sync_qpr(vcpu, ax_rd); 1129 kvmppc_sync_qpr(vcpu, ax_rd);
1133 break; 1130 break;
1134 case OP_59_FMSUBS: 1131 case OP_59_FMSUBS:
1135 fpd_fmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1132 fpd_fmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1136 kvmppc_sync_qpr(vcpu, ax_rd); 1133 kvmppc_sync_qpr(vcpu, ax_rd);
1137 break; 1134 break;
1138 case OP_59_FMADDS: 1135 case OP_59_FMADDS:
1139 fpd_fmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1136 fpd_fmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1140 kvmppc_sync_qpr(vcpu, ax_rd); 1137 kvmppc_sync_qpr(vcpu, ax_rd);
1141 break; 1138 break;
1142 case OP_59_FNMSUBS: 1139 case OP_59_FNMSUBS:
1143 fpd_fnmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1140 fpd_fnmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1144 kvmppc_sync_qpr(vcpu, ax_rd); 1141 kvmppc_sync_qpr(vcpu, ax_rd);
1145 break; 1142 break;
1146 case OP_59_FNMADDS: 1143 case OP_59_FNMADDS:
1147 fpd_fnmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1144 fpd_fnmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1148 kvmppc_sync_qpr(vcpu, ax_rd); 1145 kvmppc_sync_qpr(vcpu, ax_rd);
1149 break; 1146 break;
1150 } 1147 }
@@ -1159,12 +1156,12 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
1159 break; 1156 break;
1160 case OP_63_MFFS: 1157 case OP_63_MFFS:
1161 /* XXX missing CR */ 1158 /* XXX missing CR */
1162 *fpr_d = vcpu->arch.fpscr; 1159 *fpr_d = vcpu->arch.fp.fpscr;
1163 break; 1160 break;
1164 case OP_63_MTFSF: 1161 case OP_63_MTFSF:
1165 /* XXX missing fm bits */ 1162 /* XXX missing fm bits */
1166 /* XXX missing CR */ 1163 /* XXX missing CR */
1167 vcpu->arch.fpscr = *fpr_b; 1164 vcpu->arch.fp.fpscr = *fpr_b;
1168 break; 1165 break;
1169 case OP_63_FCMPU: 1166 case OP_63_FCMPU:
1170 { 1167 {
@@ -1172,7 +1169,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
1172 u32 cr0_mask = 0xf0000000; 1169 u32 cr0_mask = 0xf0000000;
1173 u32 cr_shift = inst_get_field(inst, 6, 8) * 4; 1170 u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
1174 1171
1175 fpd_fcmpu(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b); 1172 fpd_fcmpu(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b);
1176 cr &= ~(cr0_mask >> cr_shift); 1173 cr &= ~(cr0_mask >> cr_shift);
1177 cr |= (cr & cr0_mask) >> cr_shift; 1174 cr |= (cr & cr0_mask) >> cr_shift;
1178 break; 1175 break;
@@ -1183,40 +1180,40 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
1183 u32 cr0_mask = 0xf0000000; 1180 u32 cr0_mask = 0xf0000000;
1184 u32 cr_shift = inst_get_field(inst, 6, 8) * 4; 1181 u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
1185 1182
1186 fpd_fcmpo(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b); 1183 fpd_fcmpo(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b);
1187 cr &= ~(cr0_mask >> cr_shift); 1184 cr &= ~(cr0_mask >> cr_shift);
1188 cr |= (cr & cr0_mask) >> cr_shift; 1185 cr |= (cr & cr0_mask) >> cr_shift;
1189 break; 1186 break;
1190 } 1187 }
1191 case OP_63_FNEG: 1188 case OP_63_FNEG:
1192 fpd_fneg(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1189 fpd_fneg(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1193 break; 1190 break;
1194 case OP_63_FMR: 1191 case OP_63_FMR:
1195 *fpr_d = *fpr_b; 1192 *fpr_d = *fpr_b;
1196 break; 1193 break;
1197 case OP_63_FABS: 1194 case OP_63_FABS:
1198 fpd_fabs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1195 fpd_fabs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1199 break; 1196 break;
1200 case OP_63_FCPSGN: 1197 case OP_63_FCPSGN:
1201 fpd_fcpsgn(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1198 fpd_fcpsgn(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1202 break; 1199 break;
1203 case OP_63_FDIV: 1200 case OP_63_FDIV:
1204 fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1201 fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1205 break; 1202 break;
1206 case OP_63_FADD: 1203 case OP_63_FADD:
1207 fpd_fadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1204 fpd_fadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1208 break; 1205 break;
1209 case OP_63_FSUB: 1206 case OP_63_FSUB:
1210 fpd_fsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1207 fpd_fsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1211 break; 1208 break;
1212 case OP_63_FCTIW: 1209 case OP_63_FCTIW:
1213 fpd_fctiw(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1210 fpd_fctiw(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1214 break; 1211 break;
1215 case OP_63_FCTIWZ: 1212 case OP_63_FCTIWZ:
1216 fpd_fctiwz(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1213 fpd_fctiwz(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1217 break; 1214 break;
1218 case OP_63_FRSP: 1215 case OP_63_FRSP:
1219 fpd_frsp(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1216 fpd_frsp(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1220 kvmppc_sync_qpr(vcpu, ax_rd); 1217 kvmppc_sync_qpr(vcpu, ax_rd);
1221 break; 1218 break;
1222 case OP_63_FRSQRTE: 1219 case OP_63_FRSQRTE:
@@ -1224,39 +1221,39 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
1224 double one = 1.0f; 1221 double one = 1.0f;
1225 1222
1226 /* fD = sqrt(fB) */ 1223 /* fD = sqrt(fB) */
1227 fpd_fsqrt(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1224 fpd_fsqrt(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1228 /* fD = 1.0f / fD */ 1225 /* fD = 1.0f / fD */
1229 fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, (u64*)&one, fpr_d); 1226 fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, (u64*)&one, fpr_d);
1230 break; 1227 break;
1231 } 1228 }
1232 } 1229 }
1233 switch (inst_get_field(inst, 26, 30)) { 1230 switch (inst_get_field(inst, 26, 30)) {
1234 case OP_63_FMUL: 1231 case OP_63_FMUL:
1235 fpd_fmul(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c); 1232 fpd_fmul(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c);
1236 break; 1233 break;
1237 case OP_63_FSEL: 1234 case OP_63_FSEL:
1238 fpd_fsel(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1235 fpd_fsel(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1239 break; 1236 break;
1240 case OP_63_FMSUB: 1237 case OP_63_FMSUB:
1241 fpd_fmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1238 fpd_fmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1242 break; 1239 break;
1243 case OP_63_FMADD: 1240 case OP_63_FMADD:
1244 fpd_fmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1241 fpd_fmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1245 break; 1242 break;
1246 case OP_63_FNMSUB: 1243 case OP_63_FNMSUB:
1247 fpd_fnmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1244 fpd_fnmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1248 break; 1245 break;
1249 case OP_63_FNMADD: 1246 case OP_63_FNMADD:
1250 fpd_fnmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1247 fpd_fnmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1251 break; 1248 break;
1252 } 1249 }
1253 break; 1250 break;
1254 } 1251 }
1255 1252
1256#ifdef DEBUG 1253#ifdef DEBUG
1257 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { 1254 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) {
1258 u32 f; 1255 u32 f;
1259 kvm_cvt_df(&vcpu->arch.fpr[i], &f); 1256 kvm_cvt_df(&VCPU_FPR(vcpu, i), &f);
1260 dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f); 1257 dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f);
1261 } 1258 }
1262#endif 1259#endif
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index d63a91f825d3..2bb425b22461 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -545,12 +545,6 @@ static inline int get_fpr_index(int i)
545void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) 545void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
546{ 546{
547 struct thread_struct *t = &current->thread; 547 struct thread_struct *t = &current->thread;
548 u64 *vcpu_fpr = vcpu->arch.fpr;
549#ifdef CONFIG_VSX
550 u64 *vcpu_vsx = vcpu->arch.vsr;
551#endif
552 u64 *thread_fpr = &t->fp_state.fpr[0][0];
553 int i;
554 548
555 /* 549 /*
556 * VSX instructions can access FP and vector registers, so if 550 * VSX instructions can access FP and vector registers, so if
@@ -575,24 +569,14 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
575 */ 569 */
576 if (current->thread.regs->msr & MSR_FP) 570 if (current->thread.regs->msr & MSR_FP)
577 giveup_fpu(current); 571 giveup_fpu(current);
578 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) 572 vcpu->arch.fp = t->fp_state;
579 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
580
581 vcpu->arch.fpscr = t->fp_state.fpscr;
582
583#ifdef CONFIG_VSX
584 if (cpu_has_feature(CPU_FTR_VSX))
585 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
586 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
587#endif
588 } 573 }
589 574
590#ifdef CONFIG_ALTIVEC 575#ifdef CONFIG_ALTIVEC
591 if (msr & MSR_VEC) { 576 if (msr & MSR_VEC) {
592 if (current->thread.regs->msr & MSR_VEC) 577 if (current->thread.regs->msr & MSR_VEC)
593 giveup_altivec(current); 578 giveup_altivec(current);
594 memcpy(vcpu->arch.vr, t->vr_state.vr, sizeof(vcpu->arch.vr)); 579 vcpu->arch.vr = t->vr_state;
595 vcpu->arch.vscr = t->vr_state.vscr;
596 } 580 }
597#endif 581#endif
598 582
@@ -640,12 +624,6 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
640 ulong msr) 624 ulong msr)
641{ 625{
642 struct thread_struct *t = &current->thread; 626 struct thread_struct *t = &current->thread;
643 u64 *vcpu_fpr = vcpu->arch.fpr;
644#ifdef CONFIG_VSX
645 u64 *vcpu_vsx = vcpu->arch.vsr;
646#endif
647 u64 *thread_fpr = &t->fp_state.fpr[0][0];
648 int i;
649 627
650 /* When we have paired singles, we emulate in software */ 628 /* When we have paired singles, we emulate in software */
651 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) 629 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
@@ -683,13 +661,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
683#endif 661#endif
684 662
685 if (msr & MSR_FP) { 663 if (msr & MSR_FP) {
686 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) 664 t->fp_state = vcpu->arch.fp;
687 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
688#ifdef CONFIG_VSX
689 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
690 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
691#endif
692 t->fp_state.fpscr = vcpu->arch.fpscr;
693 t->fpexc_mode = 0; 665 t->fpexc_mode = 0;
694 enable_kernel_fp(); 666 enable_kernel_fp();
695 load_fp_state(&t->fp_state); 667 load_fp_state(&t->fp_state);
@@ -697,8 +669,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
697 669
698 if (msr & MSR_VEC) { 670 if (msr & MSR_VEC) {
699#ifdef CONFIG_ALTIVEC 671#ifdef CONFIG_ALTIVEC
700 memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); 672 t->vr_state = vcpu->arch.vr;
701 t->vr_state.vscr = vcpu->arch.vscr;
702 t->vrsave = -1; 673 t->vrsave = -1;
703 enable_kernel_altivec(); 674 enable_kernel_altivec();
704 load_vr_state(&t->vr_state); 675 load_vr_state(&t->vr_state);
@@ -1118,19 +1089,6 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1118 case KVM_REG_PPC_HIOR: 1089 case KVM_REG_PPC_HIOR:
1119 *val = get_reg_val(id, to_book3s(vcpu)->hior); 1090 *val = get_reg_val(id, to_book3s(vcpu)->hior);
1120 break; 1091 break;
1121#ifdef CONFIG_VSX
1122 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
1123 long int i = id - KVM_REG_PPC_VSR0;
1124
1125 if (!cpu_has_feature(CPU_FTR_VSX)) {
1126 r = -ENXIO;
1127 break;
1128 }
1129 val->vsxval[0] = vcpu->arch.fpr[i];
1130 val->vsxval[1] = vcpu->arch.vsr[i];
1131 break;
1132 }
1133#endif /* CONFIG_VSX */
1134 default: 1092 default:
1135 r = -EINVAL; 1093 r = -EINVAL;
1136 break; 1094 break;
@@ -1149,19 +1107,6 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1149 to_book3s(vcpu)->hior = set_reg_val(id, *val); 1107 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1150 to_book3s(vcpu)->hior_explicit = true; 1108 to_book3s(vcpu)->hior_explicit = true;
1151 break; 1109 break;
1152#ifdef CONFIG_VSX
1153 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
1154 long int i = id - KVM_REG_PPC_VSR0;
1155
1156 if (!cpu_has_feature(CPU_FTR_VSX)) {
1157 r = -ENXIO;
1158 break;
1159 }
1160 vcpu->arch.fpr[i] = val->vsxval[0];
1161 vcpu->arch.vsr[i] = val->vsxval[1];
1162 break;
1163 }
1164#endif /* CONFIG_VSX */
1165 default: 1110 default:
1166 r = -EINVAL; 1111 r = -EINVAL;
1167 break; 1112 break;
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 53e65a210b9a..0033465ecc3f 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -707,9 +707,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
707 fpexc_mode = current->thread.fpexc_mode; 707 fpexc_mode = current->thread.fpexc_mode;
708 708
709 /* Restore guest FPU state to thread */ 709 /* Restore guest FPU state to thread */
710 memcpy(current->thread.fp_state.fpr, vcpu->arch.fpr, 710 current->thread.fp_state = vcpu->arch.fp;
711 sizeof(vcpu->arch.fpr));
712 current->thread.fp_state.fpscr = vcpu->arch.fpscr;
713 711
714 /* 712 /*
715 * Since we can't trap on MSR_FP in GS-mode, we consider the guest 713 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
@@ -745,9 +743,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
745 vcpu->fpu_active = 0; 743 vcpu->fpu_active = 0;
746 744
747 /* Save guest FPU state from thread */ 745 /* Save guest FPU state from thread */
748 memcpy(vcpu->arch.fpr, current->thread.fp_state.fpr, 746 vcpu->arch.fp = current->thread.fp_state;
749 sizeof(vcpu->arch.fpr));
750 vcpu->arch.fpscr = current->thread.fp_state.fpscr;
751 747
752 /* Restore userspace FPU state from stack */ 748 /* Restore userspace FPU state from stack */
753 current->thread.fp_state = fp; 749 current->thread.fp_state = fp;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 9ae97686e9f4..7ca9e0a80499 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -656,14 +656,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
656 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 656 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
657 break; 657 break;
658 case KVM_MMIO_REG_FPR: 658 case KVM_MMIO_REG_FPR:
659 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 659 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
660 break; 660 break;
661#ifdef CONFIG_PPC_BOOK3S 661#ifdef CONFIG_PPC_BOOK3S
662 case KVM_MMIO_REG_QPR: 662 case KVM_MMIO_REG_QPR:
663 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 663 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
664 break; 664 break;
665 case KVM_MMIO_REG_FQPR: 665 case KVM_MMIO_REG_FQPR:
666 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 666 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
667 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 667 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
668 break; 668 break;
669#endif 669#endif