diff options
author | Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp> | 2013-01-08 05:46:07 -0500 |
---|---|---|
committer | Gleb Natapov <gleb@redhat.com> | 2013-01-14 04:14:09 -0500 |
commit | b34cb590fb099f7929dd78d9464b70319ee12a98 (patch) | |
tree | 2af7587ccd189b568129b9e2eccebdcb5cd690d3 | |
parent | e12091ce7bdd3c82fa392a868d1bdccecee655d5 (diff) |
KVM: Make kvm_mmu_change_mmu_pages() take mmu_lock by itself
No reason to make callers take mmu_lock since we do not need to protect
kvm_mmu_change_mmu_pages() and kvm_mmu_slot_remove_write_access()
together by mmu_lock in kvm_arch_commit_memory_region(): the former
calls kvm_mmu_commit_zap_page() and flushes TLBs by itself.
Note: we do not need to protect kvm->arch.n_requested_mmu_pages by
mmu_lock as can be seen from the fact that it is read locklessly.
Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
-rw-r--r-- | arch/x86/kvm/mmu.c | 4 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 9 |
2 files changed, 8 insertions, 5 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 9c1b2d6158bf..f5572804f594 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2143,6 +2143,8 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) | |||
2143 | * change the value | 2143 | * change the value |
2144 | */ | 2144 | */ |
2145 | 2145 | ||
2146 | spin_lock(&kvm->mmu_lock); | ||
2147 | |||
2146 | if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { | 2148 | if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { |
2147 | while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages && | 2149 | while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages && |
2148 | !list_empty(&kvm->arch.active_mmu_pages)) { | 2150 | !list_empty(&kvm->arch.active_mmu_pages)) { |
@@ -2157,6 +2159,8 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) | |||
2157 | } | 2159 | } |
2158 | 2160 | ||
2159 | kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; | 2161 | kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; |
2162 | |||
2163 | spin_unlock(&kvm->mmu_lock); | ||
2160 | } | 2164 | } |
2161 | 2165 | ||
2162 | int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) | 2166 | int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index add5e4801968..080bbdcbf2ee 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -3270,12 +3270,10 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, | |||
3270 | return -EINVAL; | 3270 | return -EINVAL; |
3271 | 3271 | ||
3272 | mutex_lock(&kvm->slots_lock); | 3272 | mutex_lock(&kvm->slots_lock); |
3273 | spin_lock(&kvm->mmu_lock); | ||
3274 | 3273 | ||
3275 | kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); | 3274 | kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); |
3276 | kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; | 3275 | kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; |
3277 | 3276 | ||
3278 | spin_unlock(&kvm->mmu_lock); | ||
3279 | mutex_unlock(&kvm->slots_lock); | 3277 | mutex_unlock(&kvm->slots_lock); |
3280 | return 0; | 3278 | return 0; |
3281 | } | 3279 | } |
@@ -6894,7 +6892,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
6894 | if (!kvm->arch.n_requested_mmu_pages) | 6892 | if (!kvm->arch.n_requested_mmu_pages) |
6895 | nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); | 6893 | nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); |
6896 | 6894 | ||
6897 | spin_lock(&kvm->mmu_lock); | ||
6898 | if (nr_mmu_pages) | 6895 | if (nr_mmu_pages) |
6899 | kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); | 6896 | kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); |
6900 | /* | 6897 | /* |
@@ -6902,9 +6899,11 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
6902 | * Existing largepage mappings are destroyed here and new ones will | 6899 | * Existing largepage mappings are destroyed here and new ones will |
6903 | * not be created until the end of the logging. | 6900 | * not be created until the end of the logging. |
6904 | */ | 6901 | */ |
6905 | if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) | 6902 | if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) { |
6903 | spin_lock(&kvm->mmu_lock); | ||
6906 | kvm_mmu_slot_remove_write_access(kvm, mem->slot); | 6904 | kvm_mmu_slot_remove_write_access(kvm, mem->slot); |
6907 | spin_unlock(&kvm->mmu_lock); | 6905 | spin_unlock(&kvm->mmu_lock); |
6906 | } | ||
6908 | /* | 6907 | /* |
6909 | * If memory slot is created, or moved, we need to clear all | 6908 | * If memory slot is created, or moved, we need to clear all |
6910 | * mmio sptes. | 6909 | * mmio sptes. |