diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2009-05-12 17:55:43 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-06-10 04:48:54 -0400 |
commit | 7c8a83b75a38a807d37f5a4398eca2a42c8cf513 (patch) | |
tree | 6d06eb065c6658c5058354cf7289428b495c909f /arch/x86/kvm/x86.c | |
parent | 310b5d306c1aee7ebe32f702c0e33e7988d50646 (diff) |
KVM: MMU: protect kvm_mmu_change_mmu_pages with mmu_lock
kvm_handle_hva, called by MMU notifiers, manipulates mmu data only with
the protection of mmu_lock.
Update kvm_mmu_change_mmu_pages callers to take mmu_lock, thus protecting
against kvm_handle_hva.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 6 |
1 files changed, 6 insertions, 0 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 48f744ff0bc1..d2a4eca26181 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1625,10 +1625,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, | |||
1625 | return -EINVAL; | 1625 | return -EINVAL; |
1626 | 1626 | ||
1627 | down_write(&kvm->slots_lock); | 1627 | down_write(&kvm->slots_lock); |
1628 | spin_lock(&kvm->mmu_lock); | ||
1628 | 1629 | ||
1629 | kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); | 1630 | kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); |
1630 | kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; | 1631 | kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; |
1631 | 1632 | ||
1633 | spin_unlock(&kvm->mmu_lock); | ||
1632 | up_write(&kvm->slots_lock); | 1634 | up_write(&kvm->slots_lock); |
1633 | return 0; | 1635 | return 0; |
1634 | } | 1636 | } |
@@ -1804,7 +1806,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
1804 | 1806 | ||
1805 | /* If nothing is dirty, don't bother messing with page tables. */ | 1807 | /* If nothing is dirty, don't bother messing with page tables. */ |
1806 | if (is_dirty) { | 1808 | if (is_dirty) { |
1809 | spin_lock(&kvm->mmu_lock); | ||
1807 | kvm_mmu_slot_remove_write_access(kvm, log->slot); | 1810 | kvm_mmu_slot_remove_write_access(kvm, log->slot); |
1811 | spin_unlock(&kvm->mmu_lock); | ||
1808 | kvm_flush_remote_tlbs(kvm); | 1812 | kvm_flush_remote_tlbs(kvm); |
1809 | memslot = &kvm->memslots[log->slot]; | 1813 | memslot = &kvm->memslots[log->slot]; |
1810 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 1814 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; |
@@ -4548,12 +4552,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
4548 | } | 4552 | } |
4549 | } | 4553 | } |
4550 | 4554 | ||
4555 | spin_lock(&kvm->mmu_lock); | ||
4551 | if (!kvm->arch.n_requested_mmu_pages) { | 4556 | if (!kvm->arch.n_requested_mmu_pages) { |
4552 | unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); | 4557 | unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); |
4553 | kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); | 4558 | kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); |
4554 | } | 4559 | } |
4555 | 4560 | ||
4556 | kvm_mmu_slot_remove_write_access(kvm, mem->slot); | 4561 | kvm_mmu_slot_remove_write_access(kvm, mem->slot); |
4562 | spin_unlock(&kvm->mmu_lock); | ||
4557 | kvm_flush_remote_tlbs(kvm); | 4563 | kvm_flush_remote_tlbs(kvm); |
4558 | 4564 | ||
4559 | return 0; | 4565 | return 0; |