aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2013-10-15 05:43:03 -0400
committerAlexander Graf <agraf@suse.de>2014-01-09 04:15:02 -0500
commit99dae3bad28d8fdd32b7bfdd5e2ec7bb2d4d019d (patch)
tree524617ede36278f57f7a06be1f6ea88987f520c0 /arch/powerpc/kvm
parentefff19122315f1431f6b02cd2983b15f5d3957bd (diff)
KVM: PPC: Load/save FP/VMX/VSX state directly to/from vcpu struct
Now that we have the vcpu floating-point and vector state stored in the same type of struct as the main kernel uses, we can load that state directly from the vcpu struct instead of having extra copies to/from the thread_struct. Similarly, when the guest state needs to be saved, we can have it saved it directly to the vcpu struct by setting the current->thread.fp_save_area and current->thread.vr_save_area pointers. That also means that we don't need to back up and restore userspace's FP/vector state. This all makes the code simpler and faster. Note that it's not necessary to save or modify current->thread.fpexc_mode, since nothing in KVM uses or is affected by its value. Nor is it necessary to touch used_vr or used_vsr. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_pr.c72
-rw-r--r--arch/powerpc/kvm/booke.c16
-rw-r--r--arch/powerpc/kvm/booke.h4
3 files changed, 19 insertions, 73 deletions
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 2bb425b22461..aedba681bb94 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -567,16 +567,16 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
567 * both the traditional FP registers and the added VSX 567 * both the traditional FP registers and the added VSX
568 * registers into thread.fp_state.fpr[]. 568 * registers into thread.fp_state.fpr[].
569 */ 569 */
570 if (current->thread.regs->msr & MSR_FP) 570 if (t->regs->msr & MSR_FP)
571 giveup_fpu(current); 571 giveup_fpu(current);
572 vcpu->arch.fp = t->fp_state; 572 t->fp_save_area = NULL;
573 } 573 }
574 574
575#ifdef CONFIG_ALTIVEC 575#ifdef CONFIG_ALTIVEC
576 if (msr & MSR_VEC) { 576 if (msr & MSR_VEC) {
577 if (current->thread.regs->msr & MSR_VEC) 577 if (current->thread.regs->msr & MSR_VEC)
578 giveup_altivec(current); 578 giveup_altivec(current);
579 vcpu->arch.vr = t->vr_state; 579 t->vr_save_area = NULL;
580 } 580 }
581#endif 581#endif
582 582
@@ -661,22 +661,20 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
661#endif 661#endif
662 662
663 if (msr & MSR_FP) { 663 if (msr & MSR_FP) {
664 t->fp_state = vcpu->arch.fp;
665 t->fpexc_mode = 0;
666 enable_kernel_fp(); 664 enable_kernel_fp();
667 load_fp_state(&t->fp_state); 665 load_fp_state(&vcpu->arch.fp);
666 t->fp_save_area = &vcpu->arch.fp;
668 } 667 }
669 668
670 if (msr & MSR_VEC) { 669 if (msr & MSR_VEC) {
671#ifdef CONFIG_ALTIVEC 670#ifdef CONFIG_ALTIVEC
672 t->vr_state = vcpu->arch.vr;
673 t->vrsave = -1;
674 enable_kernel_altivec(); 671 enable_kernel_altivec();
675 load_vr_state(&t->vr_state); 672 load_vr_state(&vcpu->arch.vr);
673 t->vr_save_area = &vcpu->arch.vr;
676#endif 674#endif
677 } 675 }
678 676
679 current->thread.regs->msr |= msr; 677 t->regs->msr |= msr;
680 vcpu->arch.guest_owned_ext |= msr; 678 vcpu->arch.guest_owned_ext |= msr;
681 kvmppc_recalc_shadow_msr(vcpu); 679 kvmppc_recalc_shadow_msr(vcpu);
682 680
@@ -697,12 +695,12 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
697 695
698 if (lost_ext & MSR_FP) { 696 if (lost_ext & MSR_FP) {
699 enable_kernel_fp(); 697 enable_kernel_fp();
700 load_fp_state(&current->thread.fp_state); 698 load_fp_state(&vcpu->arch.fp);
701 } 699 }
702#ifdef CONFIG_ALTIVEC 700#ifdef CONFIG_ALTIVEC
703 if (lost_ext & MSR_VEC) { 701 if (lost_ext & MSR_VEC) {
704 enable_kernel_altivec(); 702 enable_kernel_altivec();
705 load_vr_state(&current->thread.vr_state); 703 load_vr_state(&vcpu->arch.vr);
706 } 704 }
707#endif 705#endif
708 current->thread.regs->msr |= lost_ext; 706 current->thread.regs->msr |= lost_ext;
@@ -1204,17 +1202,9 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
1204static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 1202static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1205{ 1203{
1206 int ret; 1204 int ret;
1207 struct thread_fp_state fp;
1208 int fpexc_mode;
1209#ifdef CONFIG_ALTIVEC 1205#ifdef CONFIG_ALTIVEC
1210 struct thread_vr_state vr;
1211 unsigned long uninitialized_var(vrsave); 1206 unsigned long uninitialized_var(vrsave);
1212 int used_vr;
1213#endif 1207#endif
1214#ifdef CONFIG_VSX
1215 int used_vsr;
1216#endif
1217 ulong ext_msr;
1218 1208
1219 /* Check if we can run the vcpu at all */ 1209 /* Check if we can run the vcpu at all */
1220 if (!vcpu->arch.sane) { 1210 if (!vcpu->arch.sane) {
@@ -1236,33 +1226,22 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1236 goto out; 1226 goto out;
1237 } 1227 }
1238 1228
1239 /* Save FPU state in stack */ 1229 /* Save FPU state in thread_struct */
1240 if (current->thread.regs->msr & MSR_FP) 1230 if (current->thread.regs->msr & MSR_FP)
1241 giveup_fpu(current); 1231 giveup_fpu(current);
1242 fp = current->thread.fp_state;
1243 fpexc_mode = current->thread.fpexc_mode;
1244 1232
1245#ifdef CONFIG_ALTIVEC 1233#ifdef CONFIG_ALTIVEC
1246 /* Save Altivec state in stack */ 1234 /* Save Altivec state in thread_struct */
1247 used_vr = current->thread.used_vr; 1235 if (current->thread.regs->msr & MSR_VEC)
1248 if (used_vr) { 1236 giveup_altivec(current);
1249 if (current->thread.regs->msr & MSR_VEC)
1250 giveup_altivec(current);
1251 vr = current->thread.vr_state;
1252 vrsave = current->thread.vrsave;
1253 }
1254#endif 1237#endif
1255 1238
1256#ifdef CONFIG_VSX 1239#ifdef CONFIG_VSX
1257 /* Save VSX state in stack */ 1240 /* Save VSX state in thread_struct */
1258 used_vsr = current->thread.used_vsr; 1241 if (current->thread.regs->msr & MSR_VSX)
1259 if (used_vsr && (current->thread.regs->msr & MSR_VSX))
1260 __giveup_vsx(current); 1242 __giveup_vsx(current);
1261#endif 1243#endif
1262 1244
1263 /* Remember the MSR with disabled extensions */
1264 ext_msr = current->thread.regs->msr;
1265
1266 /* Preload FPU if it's enabled */ 1245 /* Preload FPU if it's enabled */
1267 if (vcpu->arch.shared->msr & MSR_FP) 1246 if (vcpu->arch.shared->msr & MSR_FP)
1268 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 1247 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
@@ -1277,25 +1256,6 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1277 /* Make sure we save the guest FPU/Altivec/VSX state */ 1256 /* Make sure we save the guest FPU/Altivec/VSX state */
1278 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); 1257 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1279 1258
1280 current->thread.regs->msr = ext_msr;
1281
1282 /* Restore FPU/VSX state from stack */
1283 current->thread.fp_state = fp;
1284 current->thread.fpexc_mode = fpexc_mode;
1285
1286#ifdef CONFIG_ALTIVEC
1287 /* Restore Altivec state from stack */
1288 if (used_vr && current->thread.used_vr) {
1289 current->thread.vr_state = vr;
1290 current->thread.vrsave = vrsave;
1291 }
1292 current->thread.used_vr = used_vr;
1293#endif
1294
1295#ifdef CONFIG_VSX
1296 current->thread.used_vsr = used_vsr;
1297#endif
1298
1299out: 1259out:
1300 vcpu->mode = OUTSIDE_GUEST_MODE; 1260 vcpu->mode = OUTSIDE_GUEST_MODE;
1301 return ret; 1261 return ret;
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 0033465ecc3f..a983ccaf3cce 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -682,10 +682,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
682{ 682{
683 int ret, s; 683 int ret, s;
684 struct thread_struct thread; 684 struct thread_struct thread;
685#ifdef CONFIG_PPC_FPU
686 struct thread_fp_state fp;
687 int fpexc_mode;
688#endif
689 685
690 if (!vcpu->arch.sane) { 686 if (!vcpu->arch.sane) {
691 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 687 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -703,11 +699,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
703#ifdef CONFIG_PPC_FPU 699#ifdef CONFIG_PPC_FPU
704 /* Save userspace FPU state in stack */ 700 /* Save userspace FPU state in stack */
705 enable_kernel_fp(); 701 enable_kernel_fp();
706 fp = current->thread.fp_state;
707 fpexc_mode = current->thread.fpexc_mode;
708
709 /* Restore guest FPU state to thread */
710 current->thread.fp_state = vcpu->arch.fp;
711 702
712 /* 703 /*
713 * Since we can't trap on MSR_FP in GS-mode, we consider the guest 704 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
@@ -741,13 +732,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
741 kvmppc_save_guest_fp(vcpu); 732 kvmppc_save_guest_fp(vcpu);
742 733
743 vcpu->fpu_active = 0; 734 vcpu->fpu_active = 0;
744
745 /* Save guest FPU state from thread */
746 vcpu->arch.fp = current->thread.fp_state;
747
748 /* Restore userspace FPU state from stack */
749 current->thread.fp_state = fp;
750 current->thread.fpexc_mode = fpexc_mode;
751#endif 735#endif
752 736
753out: 737out:
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index fe59f225327f..b632cd35919b 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -137,7 +137,8 @@ static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
137#ifdef CONFIG_PPC_FPU 137#ifdef CONFIG_PPC_FPU
138 if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) { 138 if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) {
139 enable_kernel_fp(); 139 enable_kernel_fp();
140 load_fp_state(&current->thread.fp_state); 140 load_fp_state(&vcpu->arch.fp);
141 current->thread.fp_save_area = &vcpu->arch.fp;
141 current->thread.regs->msr |= MSR_FP; 142 current->thread.regs->msr |= MSR_FP;
142 } 143 }
143#endif 144#endif
@@ -152,6 +153,7 @@ static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
152#ifdef CONFIG_PPC_FPU 153#ifdef CONFIG_PPC_FPU
153 if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP)) 154 if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP))
154 giveup_fpu(current); 155 giveup_fpu(current);
156 current->thread.fp_save_area = NULL;
155#endif 157#endif
156} 158}
157 159