aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/mmu.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-09-23 08:10:49 -0400
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:52:48 -0500
commit12b7d28fc102b772eb70f98491587ec5ee717baf (patch)
tree679077d072c9c0195a962a16f805bb228070a87c /drivers/kvm/mmu.c
parentc7addb902054195b995114df154e061c7d604f69 (diff)
KVM: MMU: Make flooding detection work when guest page faults are bypassed
When we allow guest page faults to reach the guests directly, we lose the fault tracking which allows us to detect demand paging. So we provide an alternate mechnism by clearing the accessed bit when we set a pte, and checking it later to see if the guest actually used it. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r--drivers/kvm/mmu.c21
1 files changed, 20 insertions, 1 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 069ce83f018e..d347e895736e 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -692,6 +692,15 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *page,
692 mmu_page_remove_parent_pte(page, parent_pte); 692 mmu_page_remove_parent_pte(page, parent_pte);
693} 693}
694 694
695static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
696{
697 int i;
698
699 for (i = 0; i < KVM_MAX_VCPUS; ++i)
700 if (kvm->vcpus[i])
701 kvm->vcpus[i]->last_pte_updated = NULL;
702}
703
695static void kvm_mmu_zap_page(struct kvm *kvm, 704static void kvm_mmu_zap_page(struct kvm *kvm,
696 struct kvm_mmu_page *page) 705 struct kvm_mmu_page *page)
697{ 706{
@@ -717,6 +726,7 @@ static void kvm_mmu_zap_page(struct kvm *kvm,
717 kvm_mmu_free_page(kvm, page); 726 kvm_mmu_free_page(kvm, page);
718 } else 727 } else
719 list_move(&page->link, &kvm->active_mmu_pages); 728 list_move(&page->link, &kvm->active_mmu_pages);
729 kvm_mmu_reset_last_pte_updated(kvm);
720} 730}
721 731
722static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) 732static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -1140,6 +1150,13 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1140 offset_in_pte); 1150 offset_in_pte);
1141} 1151}
1142 1152
1153static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1154{
1155 u64 *spte = vcpu->last_pte_updated;
1156
1157 return !!(spte && (*spte & PT_ACCESSED_MASK));
1158}
1159
1143void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 1160void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1144 const u8 *new, int bytes) 1161 const u8 *new, int bytes)
1145{ 1162{
@@ -1160,13 +1177,15 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1160 1177
1161 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes); 1178 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1162 kvm_mmu_audit(vcpu, "pre pte write"); 1179 kvm_mmu_audit(vcpu, "pre pte write");
1163 if (gfn == vcpu->last_pt_write_gfn) { 1180 if (gfn == vcpu->last_pt_write_gfn
1181 && !last_updated_pte_accessed(vcpu)) {
1164 ++vcpu->last_pt_write_count; 1182 ++vcpu->last_pt_write_count;
1165 if (vcpu->last_pt_write_count >= 3) 1183 if (vcpu->last_pt_write_count >= 3)
1166 flooded = 1; 1184 flooded = 1;
1167 } else { 1185 } else {
1168 vcpu->last_pt_write_gfn = gfn; 1186 vcpu->last_pt_write_gfn = gfn;
1169 vcpu->last_pt_write_count = 1; 1187 vcpu->last_pt_write_count = 1;
1188 vcpu->last_pte_updated = NULL;
1170 } 1189 }
1171 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; 1190 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
1172 bucket = &vcpu->kvm->mmu_page_hash[index]; 1191 bucket = &vcpu->kvm->mmu_page_hash[index];