aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/android/binder_alloc.c
diff options
context:
space:
mode:
authorTodd Kjos <tkjos@android.com>2019-03-01 18:06:06 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-03-21 01:51:32 -0400
commit5cec2d2e5839f9c0fec319c523a911e0a7fd299f (patch)
tree84da718fce8d1beaecc8ac62c419f95da174db54 /drivers/android/binder_alloc.c
parent5997da82145bb7c9a56d834894cb81f81f219344 (diff)
binder: fix race between munmap() and direct reclaim
An munmap() on a binder device causes binder_vma_close() to be called which clears the alloc->vma pointer. If direct reclaim causes binder_alloc_free_page() to be called, there is a race where alloc->vma is read into a local vma pointer and then used later after the mm->mmap_sem is acquired. This can result in calling zap_page_range() with an invalid vma which manifests as a use-after-free in zap_page_range(). The fix is to check alloc->vma after acquiring the mmap_sem (which we were acquiring anyway) and skip zap_page_range() if it has changed to NULL. Signed-off-by: Todd Kjos <tkjos@google.com> Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org> Cc: stable <stable@vger.kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r--drivers/android/binder_alloc.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 6389467670a0..195f120c4e8c 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -927,14 +927,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
927 927
928 index = page - alloc->pages; 928 index = page - alloc->pages;
929 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; 929 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
930
931 mm = alloc->vma_vm_mm;
932 if (!mmget_not_zero(mm))
933 goto err_mmget;
934 if (!down_write_trylock(&mm->mmap_sem))
935 goto err_down_write_mmap_sem_failed;
930 vma = binder_alloc_get_vma(alloc); 936 vma = binder_alloc_get_vma(alloc);
931 if (vma) {
932 if (!mmget_not_zero(alloc->vma_vm_mm))
933 goto err_mmget;
934 mm = alloc->vma_vm_mm;
935 if (!down_read_trylock(&mm->mmap_sem))
936 goto err_down_write_mmap_sem_failed;
937 }
938 937
939 list_lru_isolate(lru, item); 938 list_lru_isolate(lru, item);
940 spin_unlock(lock); 939 spin_unlock(lock);
@@ -945,10 +944,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
945 zap_page_range(vma, page_addr, PAGE_SIZE); 944 zap_page_range(vma, page_addr, PAGE_SIZE);
946 945
947 trace_binder_unmap_user_end(alloc, index); 946 trace_binder_unmap_user_end(alloc, index);
948
949 up_read(&mm->mmap_sem);
950 mmput(mm);
951 } 947 }
948 up_write(&mm->mmap_sem);
949 mmput(mm);
952 950
953 trace_binder_unmap_kernel_start(alloc, index); 951 trace_binder_unmap_kernel_start(alloc, index);
954 952