aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/android/binder_alloc.c
diff options
context:
space:
mode:
authorSherry Yang <sherryy@android.com>2017-10-20 20:58:58 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-10-21 04:14:20 -0400
commita0c2baaf81bd53dc76fccdddc721ba7dbb62be21 (patch)
tree0104ebfb31019168be252bc006e4689674cd8dcf /drivers/android/binder_alloc.c
parent9d35593b4f0b89ab0c194349c7d357b3b159e99a (diff)
android: binder: Don't get mm from task
Use binder_alloc struct's mm_struct rather than getting a reference to the mm struct through get_task_mm to avoid a potential deadlock between lru lock, task lock and dentry lock, since a thread can be holding the task lock and the dentry lock while trying to acquire the lru lock. Acked-by: Arve Hjønnevåg <arve@android.com> Signed-off-by: Sherry Yang <sherryy@android.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r--drivers/android/binder_alloc.c22
1 files changed, 9 insertions, 13 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 064f5e31ec55..e12072b1d507 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -215,17 +215,12 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
215 } 215 }
216 } 216 }
217 217
218 if (!vma && need_mm) 218 if (!vma && need_mm && mmget_not_zero(alloc->vma_vm_mm))
219 mm = get_task_mm(alloc->tsk); 219 mm = alloc->vma_vm_mm;
220 220
221 if (mm) { 221 if (mm) {
222 down_write(&mm->mmap_sem); 222 down_write(&mm->mmap_sem);
223 vma = alloc->vma; 223 vma = alloc->vma;
224 if (vma && mm != alloc->vma_vm_mm) {
225 pr_err("%d: vma mm and task mm mismatch\n",
226 alloc->pid);
227 vma = NULL;
228 }
229 } 224 }
230 225
231 if (!vma && need_mm) { 226 if (!vma && need_mm) {
@@ -720,6 +715,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
720 barrier(); 715 barrier();
721 alloc->vma = vma; 716 alloc->vma = vma;
722 alloc->vma_vm_mm = vma->vm_mm; 717 alloc->vma_vm_mm = vma->vm_mm;
718 mmgrab(alloc->vma_vm_mm);
723 719
724 return 0; 720 return 0;
725 721
@@ -795,6 +791,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
795 vfree(alloc->buffer); 791 vfree(alloc->buffer);
796 } 792 }
797 mutex_unlock(&alloc->mutex); 793 mutex_unlock(&alloc->mutex);
794 if (alloc->vma_vm_mm)
795 mmdrop(alloc->vma_vm_mm);
798 796
799 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, 797 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
800 "%s: %d buffers %d, pages %d\n", 798 "%s: %d buffers %d, pages %d\n",
@@ -889,7 +887,6 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
889void binder_alloc_vma_close(struct binder_alloc *alloc) 887void binder_alloc_vma_close(struct binder_alloc *alloc)
890{ 888{
891 WRITE_ONCE(alloc->vma, NULL); 889 WRITE_ONCE(alloc->vma, NULL);
892 WRITE_ONCE(alloc->vma_vm_mm, NULL);
893} 890}
894 891
895/** 892/**
@@ -926,9 +923,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
926 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; 923 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
927 vma = alloc->vma; 924 vma = alloc->vma;
928 if (vma) { 925 if (vma) {
929 mm = get_task_mm(alloc->tsk); 926 if (!mmget_not_zero(alloc->vma_vm_mm))
930 if (!mm) 927 goto err_mmget;
931 goto err_get_task_mm_failed; 928 mm = alloc->vma_vm_mm;
932 if (!down_write_trylock(&mm->mmap_sem)) 929 if (!down_write_trylock(&mm->mmap_sem))
933 goto err_down_write_mmap_sem_failed; 930 goto err_down_write_mmap_sem_failed;
934 } 931 }
@@ -963,7 +960,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
963 960
964err_down_write_mmap_sem_failed: 961err_down_write_mmap_sem_failed:
965 mmput_async(mm); 962 mmput_async(mm);
966err_get_task_mm_failed: 963err_mmget:
967err_page_already_freed: 964err_page_already_freed:
968 mutex_unlock(&alloc->mutex); 965 mutex_unlock(&alloc->mutex);
969err_get_alloc_mutex_failed: 966err_get_alloc_mutex_failed:
@@ -1002,7 +999,6 @@ struct shrinker binder_shrinker = {
1002 */ 999 */
1003void binder_alloc_init(struct binder_alloc *alloc) 1000void binder_alloc_init(struct binder_alloc *alloc)
1004{ 1001{
1005 alloc->tsk = current->group_leader;
1006 alloc->pid = current->group_leader->pid; 1002 alloc->pid = current->group_leader->pid;
1007 mutex_init(&alloc->mutex); 1003 mutex_init(&alloc->mutex);
1008 INIT_LIST_HEAD(&alloc->buffers); 1004 INIT_LIST_HEAD(&alloc->buffers);