aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-01-05 19:36:50 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2007-01-06 02:55:26 -0500
commit86a5ba025d0a0b251817d0efbeaf7037d4175d21 (patch)
tree35dbc71edaa0d242ba4c0ca429c41cff67df38d0
parent139bdb2d9e410d448281057a37b53770324ccac8 (diff)
[PATCH] KVM: MMU: Page table write flood protection
In fork() (or when we protect a page that is no longer a page table), we can experience floods of writes to a page, which have to be emulated. This is expensive. So, if we detect such a flood, zap the page so subsequent writes can proceed natively. Signed-off-by: Avi Kivity <avi@qumranet.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--drivers/kvm/kvm.h3
-rw-r--r--drivers/kvm/mmu.c16
2 files changed, 18 insertions, 1 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 6e4daf404146..201b2735ca91 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -238,6 +238,9 @@ struct kvm_vcpu {
238 struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES]; 238 struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES];
239 struct kvm_mmu mmu; 239 struct kvm_mmu mmu;
240 240
241 gfn_t last_pt_write_gfn;
242 int last_pt_write_count;
243
241 struct kvm_guest_debug guest_debug; 244 struct kvm_guest_debug guest_debug;
242 245
243 char fx_buf[FX_BUF_SIZE]; 246 char fx_buf[FX_BUF_SIZE];
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 8cf3688f7e70..0e44aca9eee7 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -969,8 +969,17 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
969 unsigned page_offset; 969 unsigned page_offset;
970 unsigned misaligned; 970 unsigned misaligned;
971 int level; 971 int level;
972 int flooded = 0;
972 973
973 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes); 974 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
975 if (gfn == vcpu->last_pt_write_gfn) {
976 ++vcpu->last_pt_write_count;
977 if (vcpu->last_pt_write_count >= 3)
978 flooded = 1;
979 } else {
980 vcpu->last_pt_write_gfn = gfn;
981 vcpu->last_pt_write_count = 1;
982 }
974 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; 983 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
975 bucket = &vcpu->kvm->mmu_page_hash[index]; 984 bucket = &vcpu->kvm->mmu_page_hash[index];
976 hlist_for_each_entry_safe(page, node, n, bucket, hash_link) { 985 hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
@@ -978,11 +987,16 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
978 continue; 987 continue;
979 pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8; 988 pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
980 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); 989 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
981 if (misaligned) { 990 if (misaligned || flooded) {
982 /* 991 /*
983 * Misaligned accesses are too much trouble to fix 992 * Misaligned accesses are too much trouble to fix
984 * up; also, they usually indicate a page is not used 993 * up; also, they usually indicate a page is not used
985 * as a page table. 994 * as a page table.
995 *
996 * If we're seeing too many writes to a page,
997 * it may no longer be a page table, or we may be
998 * forking, in which case it is better to unmap the
999 * page.
986 */ 1000 */
987 pgprintk("misaligned: gpa %llx bytes %d role %x\n", 1001 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
988 gpa, bytes, page->role.word); 1002 gpa, bytes, page->role.word);