diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2017-03-16 14:20:50 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-04-12 06:41:11 -0400 |
commit | a1ea3189368498e8921cb8173144fee2b191d019 (patch) | |
tree | 1763ad22b41ce8845ed12d9e4c26715ce50a4dc2 /arch/arm | |
parent | 48f2825abc65943437033adfd05b59e287ea3bfd (diff) |
arm/arm64: KVM: Take mmap_sem in kvm_arch_prepare_memory_region
commit 72f310481a08db821b614e7b5d00febcc9064b36 upstream.
We don't hold the mmap_sem while searching for VMAs (via find_vma), in
kvm_arch_prepare_memory_region, which can end up in expected failures.
Fixes: commit 8eef91239e57 ("arm/arm64: KVM: map MMIO regions at creation time")
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Eric Auger <eric.auger@rehat.com>
Reviewed-by: Christoffer Dall <cdall@linaro.org>
[ Handle dirty page logging failure case ]
Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/kvm/mmu.c | 11 |
1 files changed, 8 insertions, 3 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 089f9df16d49..17c946369c88 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -1806,6 +1806,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, | |||
1806 | (KVM_PHYS_SIZE >> PAGE_SHIFT)) | 1806 | (KVM_PHYS_SIZE >> PAGE_SHIFT)) |
1807 | return -EFAULT; | 1807 | return -EFAULT; |
1808 | 1808 | ||
1809 | down_read(¤t->mm->mmap_sem); | ||
1809 | /* | 1810 | /* |
1810 | * A memory region could potentially cover multiple VMAs, and any holes | 1811 | * A memory region could potentially cover multiple VMAs, and any holes |
1811 | * between them, so iterate over all of them to find out if we can map | 1812 | * between them, so iterate over all of them to find out if we can map |
@@ -1849,8 +1850,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, | |||
1849 | pa += vm_start - vma->vm_start; | 1850 | pa += vm_start - vma->vm_start; |
1850 | 1851 | ||
1851 | /* IO region dirty page logging not allowed */ | 1852 | /* IO region dirty page logging not allowed */ |
1852 | if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) | 1853 | if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) { |
1853 | return -EINVAL; | 1854 | ret = -EINVAL; |
1855 | goto out; | ||
1856 | } | ||
1854 | 1857 | ||
1855 | ret = kvm_phys_addr_ioremap(kvm, gpa, pa, | 1858 | ret = kvm_phys_addr_ioremap(kvm, gpa, pa, |
1856 | vm_end - vm_start, | 1859 | vm_end - vm_start, |
@@ -1862,7 +1865,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, | |||
1862 | } while (hva < reg_end); | 1865 | } while (hva < reg_end); |
1863 | 1866 | ||
1864 | if (change == KVM_MR_FLAGS_ONLY) | 1867 | if (change == KVM_MR_FLAGS_ONLY) |
1865 | return ret; | 1868 | goto out; |
1866 | 1869 | ||
1867 | spin_lock(&kvm->mmu_lock); | 1870 | spin_lock(&kvm->mmu_lock); |
1868 | if (ret) | 1871 | if (ret) |
@@ -1870,6 +1873,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, | |||
1870 | else | 1873 | else |
1871 | stage2_flush_memslot(kvm, memslot); | 1874 | stage2_flush_memslot(kvm, memslot); |
1872 | spin_unlock(&kvm->mmu_lock); | 1875 | spin_unlock(&kvm->mmu_lock); |
1876 | out: | ||
1877 | up_read(¤t->mm->mmap_sem); | ||
1873 | return ret; | 1878 | return ret; |
1874 | } | 1879 | } |
1875 | 1880 | ||