aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2009-05-12 17:55:43 -0400
committerAvi Kivity <avi@redhat.com>2009-06-10 04:48:54 -0400
commit7c8a83b75a38a807d37f5a4398eca2a42c8cf513 (patch)
tree6d06eb065c6658c5058354cf7289428b495c909f /arch/x86/kvm
parent310b5d306c1aee7ebe32f702c0e33e7988d50646 (diff)
KVM: MMU: protect kvm_mmu_change_mmu_pages with mmu_lock
kvm_handle_hva, called by MMU notifiers, manipulates mmu data only with the protection of mmu_lock. Update kvm_mmu_change_mmu_pages callers to take mmu_lock, thus protecting against kvm_handle_hva. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/x86.c6
2 files changed, 6 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index bc614f91f5ba..3ce60ad1fe37 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2723,7 +2723,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
2723{ 2723{
2724 struct kvm_mmu_page *sp; 2724 struct kvm_mmu_page *sp;
2725 2725
2726 spin_lock(&kvm->mmu_lock);
2727 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) { 2726 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
2728 int i; 2727 int i;
2729 u64 *pt; 2728 u64 *pt;
@@ -2738,7 +2737,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
2738 pt[i] &= ~PT_WRITABLE_MASK; 2737 pt[i] &= ~PT_WRITABLE_MASK;
2739 } 2738 }
2740 kvm_flush_remote_tlbs(kvm); 2739 kvm_flush_remote_tlbs(kvm);
2741 spin_unlock(&kvm->mmu_lock);
2742} 2740}
2743 2741
2744void kvm_mmu_zap_all(struct kvm *kvm) 2742void kvm_mmu_zap_all(struct kvm *kvm)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 48f744ff0bc1..d2a4eca26181 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1625,10 +1625,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1625 return -EINVAL; 1625 return -EINVAL;
1626 1626
1627 down_write(&kvm->slots_lock); 1627 down_write(&kvm->slots_lock);
1628 spin_lock(&kvm->mmu_lock);
1628 1629
1629 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); 1630 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
1630 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; 1631 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
1631 1632
1633 spin_unlock(&kvm->mmu_lock);
1632 up_write(&kvm->slots_lock); 1634 up_write(&kvm->slots_lock);
1633 return 0; 1635 return 0;
1634} 1636}
@@ -1804,7 +1806,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1804 1806
1805 /* If nothing is dirty, don't bother messing with page tables. */ 1807 /* If nothing is dirty, don't bother messing with page tables. */
1806 if (is_dirty) { 1808 if (is_dirty) {
1809 spin_lock(&kvm->mmu_lock);
1807 kvm_mmu_slot_remove_write_access(kvm, log->slot); 1810 kvm_mmu_slot_remove_write_access(kvm, log->slot);
1811 spin_unlock(&kvm->mmu_lock);
1808 kvm_flush_remote_tlbs(kvm); 1812 kvm_flush_remote_tlbs(kvm);
1809 memslot = &kvm->memslots[log->slot]; 1813 memslot = &kvm->memslots[log->slot];
1810 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 1814 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
@@ -4548,12 +4552,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
4548 } 4552 }
4549 } 4553 }
4550 4554
4555 spin_lock(&kvm->mmu_lock);
4551 if (!kvm->arch.n_requested_mmu_pages) { 4556 if (!kvm->arch.n_requested_mmu_pages) {
4552 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); 4557 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
4553 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); 4558 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
4554 } 4559 }
4555 4560
4556 kvm_mmu_slot_remove_write_access(kvm, mem->slot); 4561 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
4562 spin_unlock(&kvm->mmu_lock);
4557 kvm_flush_remote_tlbs(kvm); 4563 kvm_flush_remote_tlbs(kvm);
4558 4564
4559 return 0; 4565 return 0;