diff options
author | Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> | 2011-09-22 05:02:48 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-12-27 04:16:50 -0500 |
commit | 1cb3f3ae5a3855ba430430706da4201ace1d6ec4 (patch) | |
tree | 1ff844904958cf669c86650da4601eb392f14a91 /arch/x86/kvm/mmu.c | |
parent | d5ae7ce835cc89556dc18e2070e754f026402efa (diff) |
KVM: x86: retry non-page-table writing instructions
If the emulation is caused by #PF and it is non-page_table writing instruction,
it means the VM-EXIT is caused by shadow page protected, we can zap the shadow
page and retry this instruction directly
The idea is from Avi
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 25 |
1 files changed, 19 insertions, 6 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 232c5a30ddc8..7a22eb81b4ca 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1998,7 +1998,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) | |||
1998 | kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; | 1998 | kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; |
1999 | } | 1999 | } |
2000 | 2000 | ||
2001 | static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) | 2001 | int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) |
2002 | { | 2002 | { |
2003 | struct kvm_mmu_page *sp; | 2003 | struct kvm_mmu_page *sp; |
2004 | struct hlist_node *node; | 2004 | struct hlist_node *node; |
@@ -2007,7 +2007,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) | |||
2007 | 2007 | ||
2008 | pgprintk("%s: looking for gfn %llx\n", __func__, gfn); | 2008 | pgprintk("%s: looking for gfn %llx\n", __func__, gfn); |
2009 | r = 0; | 2009 | r = 0; |
2010 | 2010 | spin_lock(&kvm->mmu_lock); | |
2011 | for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) { | 2011 | for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) { |
2012 | pgprintk("%s: gfn %llx role %x\n", __func__, gfn, | 2012 | pgprintk("%s: gfn %llx role %x\n", __func__, gfn, |
2013 | sp->role.word); | 2013 | sp->role.word); |
@@ -2015,8 +2015,11 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) | |||
2015 | kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); | 2015 | kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); |
2016 | } | 2016 | } |
2017 | kvm_mmu_commit_zap_page(kvm, &invalid_list); | 2017 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
2018 | spin_unlock(&kvm->mmu_lock); | ||
2019 | |||
2018 | return r; | 2020 | return r; |
2019 | } | 2021 | } |
2022 | EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page); | ||
2020 | 2023 | ||
2021 | static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) | 2024 | static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) |
2022 | { | 2025 | { |
@@ -3698,9 +3701,8 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) | |||
3698 | 3701 | ||
3699 | gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); | 3702 | gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); |
3700 | 3703 | ||
3701 | spin_lock(&vcpu->kvm->mmu_lock); | ||
3702 | r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); | 3704 | r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); |
3703 | spin_unlock(&vcpu->kvm->mmu_lock); | 3705 | |
3704 | return r; | 3706 | return r; |
3705 | } | 3707 | } |
3706 | EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); | 3708 | EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); |
@@ -3721,10 +3723,18 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | |||
3721 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); | 3723 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
3722 | } | 3724 | } |
3723 | 3725 | ||
3726 | static bool is_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t addr) | ||
3727 | { | ||
3728 | if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu)) | ||
3729 | return vcpu_match_mmio_gpa(vcpu, addr); | ||
3730 | |||
3731 | return vcpu_match_mmio_gva(vcpu, addr); | ||
3732 | } | ||
3733 | |||
3724 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, | 3734 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, |
3725 | void *insn, int insn_len) | 3735 | void *insn, int insn_len) |
3726 | { | 3736 | { |
3727 | int r; | 3737 | int r, emulation_type = EMULTYPE_RETRY; |
3728 | enum emulation_result er; | 3738 | enum emulation_result er; |
3729 | 3739 | ||
3730 | r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false); | 3740 | r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false); |
@@ -3736,7 +3746,10 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, | |||
3736 | goto out; | 3746 | goto out; |
3737 | } | 3747 | } |
3738 | 3748 | ||
3739 | er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len); | 3749 | if (is_mmio_page_fault(vcpu, cr2)) |
3750 | emulation_type = 0; | ||
3751 | |||
3752 | er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len); | ||
3740 | 3753 | ||
3741 | switch (er) { | 3754 | switch (er) { |
3742 | case EMULATE_DONE: | 3755 | case EMULATE_DONE: |