aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2011-09-22 04:53:17 -0400
committerAvi Kivity <avi@redhat.com>2011-12-27 04:16:47 -0500
commitf759e2b4c728cee82e4bc1132d0e41177b79a0b1 (patch)
tree1b01eb0b666a8990366c601d96e7d1083730ae28 /arch/x86/kvm
parent51cfe38ea50aa631f58ed8c340ed6f0143c325a8 (diff)
KVM: MMU: avoid pte_list_desc running out in kvm_mmu_pte_write
kvm_mmu_pte_write is unsafe since we need to alloc pte_list_desc in the function when spte is prefetched, unfortunately, we can not know how many spte need to be prefetched on this path, that means we can use out of the free pte_list_desc object in the cache, and BUG_ON() is triggered, also some path does not fill the cache, such as INS instruction emulated that does not trigger page fault Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.c25
1 files changed, 20 insertions, 5 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f1b36cf3e3d0..232c5a30ddc8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -593,6 +593,11 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
593 return 0; 593 return 0;
594} 594}
595 595
596static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache)
597{
598 return cache->nobjs;
599}
600
596static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc, 601static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
597 struct kmem_cache *cache) 602 struct kmem_cache *cache)
598{ 603{
@@ -970,6 +975,14 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
970 return &linfo->rmap_pde; 975 return &linfo->rmap_pde;
971} 976}
972 977
978static bool rmap_can_add(struct kvm_vcpu *vcpu)
979{
980 struct kvm_mmu_memory_cache *cache;
981
982 cache = &vcpu->arch.mmu_pte_list_desc_cache;
983 return mmu_memory_cache_free_objects(cache);
984}
985
973static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) 986static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
974{ 987{
975 struct kvm_mmu_page *sp; 988 struct kvm_mmu_page *sp;
@@ -3586,6 +3599,12 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3586 break; 3599 break;
3587 } 3600 }
3588 3601
3602 /*
3603 * No need to care whether allocation memory is successful
3604 * or not since pte prefetch is skiped if it does not have
3605 * enough objects in the cache.
3606 */
3607 mmu_topup_memory_caches(vcpu);
3589 spin_lock(&vcpu->kvm->mmu_lock); 3608 spin_lock(&vcpu->kvm->mmu_lock);
3590 if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter) 3609 if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
3591 gentry = 0; 3610 gentry = 0;
@@ -3656,7 +3675,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3656 mmu_page_zap_pte(vcpu->kvm, sp, spte); 3675 mmu_page_zap_pte(vcpu->kvm, sp, spte);
3657 if (gentry && 3676 if (gentry &&
3658 !((sp->role.word ^ vcpu->arch.mmu.base_role.word) 3677 !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
3659 & mask.word)) 3678 & mask.word) && rmap_can_add(vcpu))
3660 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); 3679 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
3661 if (!remote_flush && need_remote_flush(entry, *spte)) 3680 if (!remote_flush && need_remote_flush(entry, *spte))
3662 remote_flush = true; 3681 remote_flush = true;
@@ -3717,10 +3736,6 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
3717 goto out; 3736 goto out;
3718 } 3737 }
3719 3738
3720 r = mmu_topup_memory_caches(vcpu);
3721 if (r)
3722 goto out;
3723
3724 er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len); 3739 er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len);
3725 3740
3726 switch (er) { 3741 switch (er) {