aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2010-08-28 07:22:46 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:51:47 -0400
commitbc32ce2152406431acf4daf4a81dc1664bb7b91b (patch)
treeae2f6737d56c8d977485489d58d08b3528fd2d6e
parent0beb8d660425aab339ff68e6f4d4528739e8fc4f (diff)
KVM: MMU: fix wrong not write protected sp report
The audit code reports some sp not write protected in current code, it's just the bug in audit_write_protection(), since: - the invalid sp not need write protected - using uninitialize local variable('gfn') - call kvm_mmu_audit() out of mmu_lock's protection Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/kvm/mmu.c5
-rw-r--r--arch/x86/kvm/paging_tmpl.h3
2 files changed, 5 insertions, 3 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1c784b96dac3..68575dc32ec7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3708,16 +3708,17 @@ static void audit_write_protection(struct kvm_vcpu *vcpu)
3708 struct kvm_memory_slot *slot; 3708 struct kvm_memory_slot *slot;
3709 unsigned long *rmapp; 3709 unsigned long *rmapp;
3710 u64 *spte; 3710 u64 *spte;
3711 gfn_t gfn;
3712 3711
3713 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) { 3712 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3714 if (sp->role.direct) 3713 if (sp->role.direct)
3715 continue; 3714 continue;
3716 if (sp->unsync) 3715 if (sp->unsync)
3717 continue; 3716 continue;
3717 if (sp->role.invalid)
3718 continue;
3718 3719
3719 slot = gfn_to_memslot(vcpu->kvm, sp->gfn); 3720 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
3720 rmapp = &slot->rmap[gfn - slot->base_gfn]; 3721 rmapp = &slot->rmap[sp->gfn - slot->base_gfn];
3721 3722
3722 spte = rmap_next(vcpu->kvm, rmapp, NULL); 3723 spte = rmap_next(vcpu->kvm, rmapp, NULL);
3723 while (spte) { 3724 while (spte) {
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index a4e8389df2ad..a0f2febf5692 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -504,7 +504,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
504 unsigned long mmu_seq; 504 unsigned long mmu_seq;
505 505
506 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); 506 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
507 kvm_mmu_audit(vcpu, "pre page fault");
508 507
509 r = mmu_topup_memory_caches(vcpu); 508 r = mmu_topup_memory_caches(vcpu);
510 if (r) 509 if (r)
@@ -542,6 +541,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
542 spin_lock(&vcpu->kvm->mmu_lock); 541 spin_lock(&vcpu->kvm->mmu_lock);
543 if (mmu_notifier_retry(vcpu, mmu_seq)) 542 if (mmu_notifier_retry(vcpu, mmu_seq))
544 goto out_unlock; 543 goto out_unlock;
544
545 kvm_mmu_audit(vcpu, "pre page fault");
545 kvm_mmu_free_some_pages(vcpu); 546 kvm_mmu_free_some_pages(vcpu);
546 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, 547 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
547 level, &write_pt, pfn); 548 level, &write_pt, pfn);