aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-06-10 10:02:16 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 03:46:35 -0400
commit2390218b6aa2eb3784b0a82fa811c19097dc793a (patch)
treefacf41c6ed5c3d2eafa939e7ab9209cc1594cddb
parenta83b29c6ad6d6497e569edbc29e556a384cebddd (diff)
KVM: Fix mov cr3 #GP at wrong instruction
On Intel, we call skip_emulated_instruction() even if we injected a #GP, resulting in the #GP pointing at the wrong address. Fix by injecting the exception and skipping the instruction at the same place, so we can do just one or the other. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/svm.c4
-rw-r--r--arch/x86/kvm/vmx.c4
-rw-r--r--arch/x86/kvm/x86.c10
5 files changed, 8 insertions, 14 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ea8c319cdffc..c2813d658f3e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -598,7 +598,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
598 bool has_error_code, u32 error_code); 598 bool has_error_code, u32 error_code);
599 599
600int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 600int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
601void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); 601int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
602int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 602int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
603void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); 603void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
604int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); 604int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4706a936e36f..aa98fca03ed7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3203,7 +3203,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
3203 3203
3204static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) 3204static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
3205{ 3205{
3206 kvm_set_cr3(vcpu, vcpu->arch.cr3); 3206 (void)kvm_set_cr3(vcpu, vcpu->arch.cr3);
3207 return 1; 3207 return 1;
3208} 3208}
3209 3209
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 6d1616d47c54..f7a6fdcf8ef3 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1963,7 +1963,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1963 svm->vmcb->save.cr3 = hsave->save.cr3; 1963 svm->vmcb->save.cr3 = hsave->save.cr3;
1964 svm->vcpu.arch.cr3 = hsave->save.cr3; 1964 svm->vcpu.arch.cr3 = hsave->save.cr3;
1965 } else { 1965 } else {
1966 kvm_set_cr3(&svm->vcpu, hsave->save.cr3); 1966 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
1967 } 1967 }
1968 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax); 1968 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
1969 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp); 1969 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
@@ -2086,7 +2086,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
2086 svm->vmcb->save.cr3 = nested_vmcb->save.cr3; 2086 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
2087 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3; 2087 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
2088 } else 2088 } else
2089 kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3); 2089 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
2090 2090
2091 /* Guest paging mode is active - reset mmu */ 2091 /* Guest paging mode is active - reset mmu */
2092 kvm_mmu_reset_context(&svm->vcpu); 2092 kvm_mmu_reset_context(&svm->vcpu);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f64d65dc38c6..345a35470511 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3185,8 +3185,8 @@ static int handle_cr(struct kvm_vcpu *vcpu)
3185 complete_insn_gp(vcpu, err); 3185 complete_insn_gp(vcpu, err);
3186 return 1; 3186 return 1;
3187 case 3: 3187 case 3:
3188 kvm_set_cr3(vcpu, val); 3188 err = kvm_set_cr3(vcpu, val);
3189 skip_emulated_instruction(vcpu); 3189 complete_insn_gp(vcpu, err);
3190 return 1; 3190 return 1;
3191 case 4: 3191 case 4:
3192 err = kvm_set_cr4(vcpu, val); 3192 err = kvm_set_cr4(vcpu, val);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ed3af15d4404..795999e1ac19 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -565,7 +565,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
565} 565}
566EXPORT_SYMBOL_GPL(kvm_set_cr4); 566EXPORT_SYMBOL_GPL(kvm_set_cr4);
567 567
568static int __kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 568int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
569{ 569{
570 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) { 570 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
571 kvm_mmu_sync_roots(vcpu); 571 kvm_mmu_sync_roots(vcpu);
@@ -604,12 +604,6 @@ static int __kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
604 vcpu->arch.mmu.new_cr3(vcpu); 604 vcpu->arch.mmu.new_cr3(vcpu);
605 return 0; 605 return 0;
606} 606}
607
608void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
609{
610 if (__kvm_set_cr3(vcpu, cr3))
611 kvm_inject_gp(vcpu, 0);
612}
613EXPORT_SYMBOL_GPL(kvm_set_cr3); 607EXPORT_SYMBOL_GPL(kvm_set_cr3);
614 608
615int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) 609int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
@@ -3726,7 +3720,7 @@ static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
3726 vcpu->arch.cr2 = val; 3720 vcpu->arch.cr2 = val;
3727 break; 3721 break;
3728 case 3: 3722 case 3:
3729 res = __kvm_set_cr3(vcpu, val); 3723 res = kvm_set_cr3(vcpu, val);
3730 break; 3724 break;
3731 case 4: 3725 case 4:
3732 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); 3726 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));