aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>2013-01-08 05:44:48 -0500
committerGleb Natapov <gleb@redhat.com>2013-01-14 04:13:47 -0500
commitb99db1d35295cb26b61a1c665f542504110b0ac3 (patch)
tree57c9fc5a8189e1ec73259701b6ff6296e1983c18 /arch
parent245c3912eae642a4b7a3ce0adfcde5cc7672d5fe (diff)
KVM: MMU: Make kvm_mmu_slot_remove_write_access() rmap based
This makes it possible to release mmu_lock and reschedule conditionally in a later patch. Although this may increase the time needed to protect the whole slot when we start dirty logging, the kernel should not allow the userspace to trigger something that will hold a spinlock for such a long time as tens of milliseconds: actually there is no limit since it is roughly proportional to the number of guest pages. Another point to note is that this patch removes the only user of slot_bitmap which will cause some problems when we increase the number of slots further. Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp> Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu.c28
1 files changed, 15 insertions, 13 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2a1cde5a3938..aeb7666eb81e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4172,25 +4172,27 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu)
4172 4172
4173void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) 4173void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
4174{ 4174{
4175 struct kvm_mmu_page *sp; 4175 struct kvm_memory_slot *memslot;
4176 bool flush = false; 4176 gfn_t last_gfn;
4177 int i;
4177 4178
4178 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) { 4179 memslot = id_to_memslot(kvm->memslots, slot);
4179 int i; 4180 last_gfn = memslot->base_gfn + memslot->npages - 1;
4180 u64 *pt;
4181 4181
4182 if (!test_bit(slot, sp->slot_bitmap)) 4182 for (i = PT_PAGE_TABLE_LEVEL;
4183 continue; 4183 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
4184 unsigned long *rmapp;
4185 unsigned long last_index, index;
4184 4186
4185 pt = sp->spt; 4187 rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL];
4186 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { 4188 last_index = gfn_to_index(last_gfn, memslot->base_gfn, i);
4187 if (!is_shadow_present_pte(pt[i]) ||
4188 !is_last_spte(pt[i], sp->role.level))
4189 continue;
4190 4189
4191 spte_write_protect(kvm, &pt[i], &flush, false); 4190 for (index = 0; index <= last_index; ++index, ++rmapp) {
4191 if (*rmapp)
4192 __rmap_write_protect(kvm, rmapp, false);
4192 } 4193 }
4193 } 4194 }
4195
4194 kvm_flush_remote_tlbs(kvm); 4196 kvm_flush_remote_tlbs(kvm);
4195} 4197}
4196 4198