aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-09-23 08:10:49 -0400
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:52:48 -0500
commit12b7d28fc102b772eb70f98491587ec5ee717baf (patch)
tree679077d072c9c0195a962a16f805bb228070a87c /drivers
parentc7addb902054195b995114df154e061c7d604f69 (diff)
KVM: MMU: Make flooding detection work when guest page faults are bypassed
When we allow guest page faults to reach the guests directly, we lose the fault tracking which allows us to detect demand paging. So we provide an alternate mechnism by clearing the accessed bit when we set a pte, and checking it later to see if the guest actually used it. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/kvm/kvm.h1
-rw-r--r--drivers/kvm/mmu.c21
-rw-r--r--drivers/kvm/paging_tmpl.h9
3 files changed, 29 insertions, 2 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 7de948e9e64e..08ffc829f07f 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -346,6 +346,7 @@ struct kvm_vcpu {
346 346
347 gfn_t last_pt_write_gfn; 347 gfn_t last_pt_write_gfn;
348 int last_pt_write_count; 348 int last_pt_write_count;
349 u64 *last_pte_updated;
349 350
350 struct kvm_guest_debug guest_debug; 351 struct kvm_guest_debug guest_debug;
351 352
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 069ce83f018e..d347e895736e 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -692,6 +692,15 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *page,
692 mmu_page_remove_parent_pte(page, parent_pte); 692 mmu_page_remove_parent_pte(page, parent_pte);
693} 693}
694 694
695static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
696{
697 int i;
698
699 for (i = 0; i < KVM_MAX_VCPUS; ++i)
700 if (kvm->vcpus[i])
701 kvm->vcpus[i]->last_pte_updated = NULL;
702}
703
695static void kvm_mmu_zap_page(struct kvm *kvm, 704static void kvm_mmu_zap_page(struct kvm *kvm,
696 struct kvm_mmu_page *page) 705 struct kvm_mmu_page *page)
697{ 706{
@@ -717,6 +726,7 @@ static void kvm_mmu_zap_page(struct kvm *kvm,
717 kvm_mmu_free_page(kvm, page); 726 kvm_mmu_free_page(kvm, page);
718 } else 727 } else
719 list_move(&page->link, &kvm->active_mmu_pages); 728 list_move(&page->link, &kvm->active_mmu_pages);
729 kvm_mmu_reset_last_pte_updated(kvm);
720} 730}
721 731
722static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) 732static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -1140,6 +1150,13 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1140 offset_in_pte); 1150 offset_in_pte);
1141} 1151}
1142 1152
1153static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1154{
1155 u64 *spte = vcpu->last_pte_updated;
1156
1157 return !!(spte && (*spte & PT_ACCESSED_MASK));
1158}
1159
1143void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 1160void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1144 const u8 *new, int bytes) 1161 const u8 *new, int bytes)
1145{ 1162{
@@ -1160,13 +1177,15 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1160 1177
1161 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes); 1178 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1162 kvm_mmu_audit(vcpu, "pre pte write"); 1179 kvm_mmu_audit(vcpu, "pre pte write");
1163 if (gfn == vcpu->last_pt_write_gfn) { 1180 if (gfn == vcpu->last_pt_write_gfn
1181 && !last_updated_pte_accessed(vcpu)) {
1164 ++vcpu->last_pt_write_count; 1182 ++vcpu->last_pt_write_count;
1165 if (vcpu->last_pt_write_count >= 3) 1183 if (vcpu->last_pt_write_count >= 3)
1166 flooded = 1; 1184 flooded = 1;
1167 } else { 1185 } else {
1168 vcpu->last_pt_write_gfn = gfn; 1186 vcpu->last_pt_write_gfn = gfn;
1169 vcpu->last_pt_write_count = 1; 1187 vcpu->last_pt_write_count = 1;
1188 vcpu->last_pte_updated = NULL;
1170 } 1189 }
1171 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; 1190 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
1172 bucket = &vcpu->kvm->mmu_page_hash[index]; 1191 bucket = &vcpu->kvm->mmu_page_hash[index];
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 99ac9b15f773..be0f85231da9 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -238,7 +238,12 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
238 FNAME(mark_pagetable_dirty)(vcpu->kvm, walker); 238 FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
239 } 239 }
240 240
241 spte = PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK; 241 /*
242 * We don't set the accessed bit, since we sometimes want to see
243 * whether the guest actually used the pte (in order to detect
244 * demand paging).
245 */
246 spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
242 spte |= gpte & PT64_NX_MASK; 247 spte |= gpte & PT64_NX_MASK;
243 if (!dirty) 248 if (!dirty)
244 access_bits &= ~PT_WRITABLE_MASK; 249 access_bits &= ~PT_WRITABLE_MASK;
@@ -291,6 +296,8 @@ unshadowed:
291 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr); 296 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
292 if (!was_rmapped) 297 if (!was_rmapped)
293 rmap_add(vcpu, shadow_pte); 298 rmap_add(vcpu, shadow_pte);
299 if (!ptwrite || !*ptwrite)
300 vcpu->last_pte_updated = shadow_pte;
294} 301}
295 302
296static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t gpte, 303static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t gpte,