aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2010-04-23 04:48:35 -0400
committerAvi Kivity <avi@redhat.com>2010-05-17 05:19:13 -0400
commitd14769377a247d4e7b570592a090474c8a059938 (patch)
treefd7e0de9712a29f99962b789ab3ab70a1338087f /virt
parent039091875ce4629d83db64c055528e7b86337d50 (diff)
KVM: Remove test-before-set optimization for dirty bits
As Avi pointed out, testing bit part in mark_page_dirty() was important in the days of shadow paging, but currently EPT and NPT has already become common and the chance of faulting a page more that once per iteration is small. So let's remove the test bit to avoid extra access. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c4
1 files changed, 1 insertions, 3 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 6dc940455e8b..9ab1a77941ef 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1192,9 +1192,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1192 if (memslot && memslot->dirty_bitmap) { 1192 if (memslot && memslot->dirty_bitmap) {
1193 unsigned long rel_gfn = gfn - memslot->base_gfn; 1193 unsigned long rel_gfn = gfn - memslot->base_gfn;
1194 1194
1195 /* avoid RMW */ 1195 generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
1196 if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap))
1197 generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
1198 } 1196 }
1199} 1197}
1200 1198