aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/android/binder_alloc.c
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-10-23 11:21:44 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-10-23 11:21:44 -0400
commit6fbf248a20d362a92de60beee9474faca0b54eee (patch)
treeb5365be73b420be3b6036fb40aaf1b167c90b180 /drivers/android/binder_alloc.c
parentde4ce2d1ad1bb3304d4107160c9551b7fd8d8ec5 (diff)
parentbb176f67090ca54869fc1262c913aa69d2ede070 (diff)
Merge 4.14-rc6 into char-misc-next
We want the driver fixes in here and this resolves a merge issue with the binder driver. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r--drivers/android/binder_alloc.c24
1 files changed, 10 insertions, 14 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index ed0c9dc792eb..6f6f745605af 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -215,17 +215,12 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
215 } 215 }
216 } 216 }
217 217
218 if (need_mm) 218 if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
219 mm = get_task_mm(alloc->tsk); 219 mm = alloc->vma_vm_mm;
220 220
221 if (mm) { 221 if (mm) {
222 down_write(&mm->mmap_sem); 222 down_write(&mm->mmap_sem);
223 vma = alloc->vma; 223 vma = alloc->vma;
224 if (vma && mm != alloc->vma_vm_mm) {
225 pr_err("%d: vma mm and task mm mismatch\n",
226 alloc->pid);
227 vma = NULL;
228 }
229 } 224 }
230 225
231 if (!vma && need_mm) { 226 if (!vma && need_mm) {
@@ -565,7 +560,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
565 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 560 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
566 "%d: merge free, buffer %pK do not share page with %pK or %pK\n", 561 "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
567 alloc->pid, buffer->data, 562 alloc->pid, buffer->data,
568 prev->data, next->data); 563 prev->data, next ? next->data : NULL);
569 binder_update_page_range(alloc, 0, buffer_start_page(buffer), 564 binder_update_page_range(alloc, 0, buffer_start_page(buffer),
570 buffer_start_page(buffer) + PAGE_SIZE); 565 buffer_start_page(buffer) + PAGE_SIZE);
571 } 566 }
@@ -718,6 +713,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
718 barrier(); 713 barrier();
719 alloc->vma = vma; 714 alloc->vma = vma;
720 alloc->vma_vm_mm = vma->vm_mm; 715 alloc->vma_vm_mm = vma->vm_mm;
716 mmgrab(alloc->vma_vm_mm);
721 717
722 return 0; 718 return 0;
723 719
@@ -793,6 +789,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
793 vfree(alloc->buffer); 789 vfree(alloc->buffer);
794 } 790 }
795 mutex_unlock(&alloc->mutex); 791 mutex_unlock(&alloc->mutex);
792 if (alloc->vma_vm_mm)
793 mmdrop(alloc->vma_vm_mm);
796 794
797 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, 795 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
798 "%s: %d buffers %d, pages %d\n", 796 "%s: %d buffers %d, pages %d\n",
@@ -887,7 +885,6 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
887void binder_alloc_vma_close(struct binder_alloc *alloc) 885void binder_alloc_vma_close(struct binder_alloc *alloc)
888{ 886{
889 WRITE_ONCE(alloc->vma, NULL); 887 WRITE_ONCE(alloc->vma, NULL);
890 WRITE_ONCE(alloc->vma_vm_mm, NULL);
891} 888}
892 889
893/** 890/**
@@ -924,9 +921,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
924 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; 921 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
925 vma = alloc->vma; 922 vma = alloc->vma;
926 if (vma) { 923 if (vma) {
927 mm = get_task_mm(alloc->tsk); 924 if (!mmget_not_zero(alloc->vma_vm_mm))
928 if (!mm) 925 goto err_mmget;
929 goto err_get_task_mm_failed; 926 mm = alloc->vma_vm_mm;
930 if (!down_write_trylock(&mm->mmap_sem)) 927 if (!down_write_trylock(&mm->mmap_sem))
931 goto err_down_write_mmap_sem_failed; 928 goto err_down_write_mmap_sem_failed;
932 } 929 }
@@ -961,7 +958,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
961 958
962err_down_write_mmap_sem_failed: 959err_down_write_mmap_sem_failed:
963 mmput_async(mm); 960 mmput_async(mm);
964err_get_task_mm_failed: 961err_mmget:
965err_page_already_freed: 962err_page_already_freed:
966 mutex_unlock(&alloc->mutex); 963 mutex_unlock(&alloc->mutex);
967err_get_alloc_mutex_failed: 964err_get_alloc_mutex_failed:
@@ -1000,7 +997,6 @@ static struct shrinker binder_shrinker = {
1000 */ 997 */
1001void binder_alloc_init(struct binder_alloc *alloc) 998void binder_alloc_init(struct binder_alloc *alloc)
1002{ 999{
1003 alloc->tsk = current->group_leader;
1004 alloc->pid = current->group_leader->pid; 1000 alloc->pid = current->group_leader->pid;
1005 mutex_init(&alloc->mutex); 1001 mutex_init(&alloc->mutex);
1006 INIT_LIST_HEAD(&alloc->buffers); 1002 INIT_LIST_HEAD(&alloc->buffers);