diff options
| -rw-r--r-- | arch/powerpc/kvm/booke.c | 3 | ||||
| -rw-r--r-- | arch/x86/kernel/kvmclock.c | 1 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.c | 5 |
3 files changed, 5 insertions, 4 deletions
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 5cd7ad0c1176..1a1b51189773 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
| @@ -673,7 +673,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
| 673 | ret = s; | 673 | ret = s; |
| 674 | goto out; | 674 | goto out; |
| 675 | } | 675 | } |
| 676 | kvmppc_lazy_ee_enable(); | ||
| 677 | 676 | ||
| 678 | kvm_guest_enter(); | 677 | kvm_guest_enter(); |
| 679 | 678 | ||
| @@ -699,6 +698,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
| 699 | kvmppc_load_guest_fp(vcpu); | 698 | kvmppc_load_guest_fp(vcpu); |
| 700 | #endif | 699 | #endif |
| 701 | 700 | ||
| 701 | kvmppc_lazy_ee_enable(); | ||
| 702 | |||
| 702 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); | 703 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); |
| 703 | 704 | ||
| 704 | /* No need for kvm_guest_exit. It's done in handle_exit. | 705 | /* No need for kvm_guest_exit. It's done in handle_exit. |
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index d2c381280e3c..3dd37ebd591b 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
| @@ -242,6 +242,7 @@ void __init kvmclock_init(void) | |||
| 242 | if (!mem) | 242 | if (!mem) |
| 243 | return; | 243 | return; |
| 244 | hv_clock = __va(mem); | 244 | hv_clock = __va(mem); |
| 245 | memset(hv_clock, 0, size); | ||
| 245 | 246 | ||
| 246 | if (kvm_register_clock("boot clock")) { | 247 | if (kvm_register_clock("boot clock")) { |
| 247 | hv_clock = NULL; | 248 | hv_clock = NULL; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 094b5d96ab14..e8ba99c34180 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -582,8 +582,6 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) | |||
| 582 | if (index != XCR_XFEATURE_ENABLED_MASK) | 582 | if (index != XCR_XFEATURE_ENABLED_MASK) |
| 583 | return 1; | 583 | return 1; |
| 584 | xcr0 = xcr; | 584 | xcr0 = xcr; |
| 585 | if (kvm_x86_ops->get_cpl(vcpu) != 0) | ||
| 586 | return 1; | ||
| 587 | if (!(xcr0 & XSTATE_FP)) | 585 | if (!(xcr0 & XSTATE_FP)) |
| 588 | return 1; | 586 | return 1; |
| 589 | if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) | 587 | if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) |
| @@ -597,7 +595,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) | |||
| 597 | 595 | ||
| 598 | int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) | 596 | int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) |
| 599 | { | 597 | { |
| 600 | if (__kvm_set_xcr(vcpu, index, xcr)) { | 598 | if (kvm_x86_ops->get_cpl(vcpu) != 0 || |
| 599 | __kvm_set_xcr(vcpu, index, xcr)) { | ||
| 601 | kvm_inject_gp(vcpu, 0); | 600 | kvm_inject_gp(vcpu, 0); |
| 602 | return 1; | 601 | return 1; |
| 603 | } | 602 | } |
