diff options
author | Suzuki K Poulose <suzuki.poulose@arm.com> | 2017-04-03 10:12:43 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-04-12 06:41:11 -0400 |
commit | ac303c64cdb82a66817df89a7b56ebececd7396f (patch) | |
tree | 5842dade07d2b586f7ae16df7540e8f40a7f291f | |
parent | a1ea3189368498e8921cb8173144fee2b191d019 (diff) |
kvm: arm/arm64: Fix locking for kvm_free_stage2_pgd
commit 8b3405e345b5a098101b0c31b264c812bba045d9 upstream.
In kvm_free_stage2_pgd() we don't hold the kvm->mmu_lock while calling
unmap_stage2_range() on the entire memory range for the guest. This could
cause problems with other callers (e.g, munmap on a memslot) trying to
unmap a range. And since we have to unmap the entire Guest memory range
holding a spinlock, make sure we yield the lock if necessary, after we
unmap each PUD range.
Fixes: commit d5d8184d35c9 ("KVM: ARM: Memory virtualization setup")
Cc: Paolo Bonzini <pbonzin@redhat.com>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Christoffer Dall <christoffer.dall@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
[ Avoid vCPU starvation and lockup detector warnings ]
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Christoffer Dall <cdall@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | arch/arm/kvm/mmu.c | 10 |
1 files changed, 10 insertions, 0 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 17c946369c88..2fd5c135e8a4 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -292,11 +292,18 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) | |||
292 | phys_addr_t addr = start, end = start + size; | 292 | phys_addr_t addr = start, end = start + size; |
293 | phys_addr_t next; | 293 | phys_addr_t next; |
294 | 294 | ||
295 | assert_spin_locked(&kvm->mmu_lock); | ||
295 | pgd = kvm->arch.pgd + stage2_pgd_index(addr); | 296 | pgd = kvm->arch.pgd + stage2_pgd_index(addr); |
296 | do { | 297 | do { |
297 | next = stage2_pgd_addr_end(addr, end); | 298 | next = stage2_pgd_addr_end(addr, end); |
298 | if (!stage2_pgd_none(*pgd)) | 299 | if (!stage2_pgd_none(*pgd)) |
299 | unmap_stage2_puds(kvm, pgd, addr, next); | 300 | unmap_stage2_puds(kvm, pgd, addr, next); |
301 | /* | ||
302 | * If the range is too large, release the kvm->mmu_lock | ||
303 | * to prevent starvation and lockup detector warnings. | ||
304 | */ | ||
305 | if (next != end) | ||
306 | cond_resched_lock(&kvm->mmu_lock); | ||
300 | } while (pgd++, addr = next, addr != end); | 307 | } while (pgd++, addr = next, addr != end); |
301 | } | 308 | } |
302 | 309 | ||
@@ -831,7 +838,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm) | |||
831 | if (kvm->arch.pgd == NULL) | 838 | if (kvm->arch.pgd == NULL) |
832 | return; | 839 | return; |
833 | 840 | ||
841 | spin_lock(&kvm->mmu_lock); | ||
834 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); | 842 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); |
843 | spin_unlock(&kvm->mmu_lock); | ||
844 | |||
835 | /* Free the HW pgd, one page at a time */ | 845 | /* Free the HW pgd, one page at a time */ |
836 | free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE); | 846 | free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE); |
837 | kvm->arch.pgd = NULL; | 847 | kvm->arch.pgd = NULL; |