diff options
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r-- | drivers/android/binder_alloc.c | 43 |
1 files changed, 35 insertions, 8 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 3f3b7b253445..64fd96eada31 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c | |||
@@ -332,6 +332,35 @@ err_no_vma: | |||
332 | return vma ? -ENOMEM : -ESRCH; | 332 | return vma ? -ENOMEM : -ESRCH; |
333 | } | 333 | } |
334 | 334 | ||
335 | |||
336 | static inline void binder_alloc_set_vma(struct binder_alloc *alloc, | ||
337 | struct vm_area_struct *vma) | ||
338 | { | ||
339 | if (vma) | ||
340 | alloc->vma_vm_mm = vma->vm_mm; | ||
341 | /* | ||
342 | * If we see alloc->vma is not NULL, buffer data structures set up | ||
343 | * completely. Look at smp_rmb side binder_alloc_get_vma. | ||
344 | * We also want to guarantee new alloc->vma_vm_mm is always visible | ||
345 | * if alloc->vma is set. | ||
346 | */ | ||
347 | smp_wmb(); | ||
348 | alloc->vma = vma; | ||
349 | } | ||
350 | |||
351 | static inline struct vm_area_struct *binder_alloc_get_vma( | ||
352 | struct binder_alloc *alloc) | ||
353 | { | ||
354 | struct vm_area_struct *vma = NULL; | ||
355 | |||
356 | if (alloc->vma) { | ||
357 | /* Look at description in binder_alloc_set_vma */ | ||
358 | smp_rmb(); | ||
359 | vma = alloc->vma; | ||
360 | } | ||
361 | return vma; | ||
362 | } | ||
363 | |||
335 | static struct binder_buffer *binder_alloc_new_buf_locked( | 364 | static struct binder_buffer *binder_alloc_new_buf_locked( |
336 | struct binder_alloc *alloc, | 365 | struct binder_alloc *alloc, |
337 | size_t data_size, | 366 | size_t data_size, |
@@ -348,7 +377,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( | |||
348 | size_t size, data_offsets_size; | 377 | size_t size, data_offsets_size; |
349 | int ret; | 378 | int ret; |
350 | 379 | ||
351 | if (alloc->vma == NULL) { | 380 | if (!binder_alloc_get_vma(alloc)) { |
352 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, | 381 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, |
353 | "%d: binder_alloc_buf, no vma\n", | 382 | "%d: binder_alloc_buf, no vma\n", |
354 | alloc->pid); | 383 | alloc->pid); |
@@ -723,9 +752,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, | |||
723 | buffer->free = 1; | 752 | buffer->free = 1; |
724 | binder_insert_free_buffer(alloc, buffer); | 753 | binder_insert_free_buffer(alloc, buffer); |
725 | alloc->free_async_space = alloc->buffer_size / 2; | 754 | alloc->free_async_space = alloc->buffer_size / 2; |
726 | barrier(); | 755 | binder_alloc_set_vma(alloc, vma); |
727 | alloc->vma = vma; | ||
728 | alloc->vma_vm_mm = vma->vm_mm; | ||
729 | mmgrab(alloc->vma_vm_mm); | 756 | mmgrab(alloc->vma_vm_mm); |
730 | 757 | ||
731 | return 0; | 758 | return 0; |
@@ -754,10 +781,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) | |||
754 | int buffers, page_count; | 781 | int buffers, page_count; |
755 | struct binder_buffer *buffer; | 782 | struct binder_buffer *buffer; |
756 | 783 | ||
757 | BUG_ON(alloc->vma); | ||
758 | |||
759 | buffers = 0; | 784 | buffers = 0; |
760 | mutex_lock(&alloc->mutex); | 785 | mutex_lock(&alloc->mutex); |
786 | BUG_ON(alloc->vma); | ||
787 | |||
761 | while ((n = rb_first(&alloc->allocated_buffers))) { | 788 | while ((n = rb_first(&alloc->allocated_buffers))) { |
762 | buffer = rb_entry(n, struct binder_buffer, rb_node); | 789 | buffer = rb_entry(n, struct binder_buffer, rb_node); |
763 | 790 | ||
@@ -900,7 +927,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc) | |||
900 | */ | 927 | */ |
901 | void binder_alloc_vma_close(struct binder_alloc *alloc) | 928 | void binder_alloc_vma_close(struct binder_alloc *alloc) |
902 | { | 929 | { |
903 | WRITE_ONCE(alloc->vma, NULL); | 930 | binder_alloc_set_vma(alloc, NULL); |
904 | } | 931 | } |
905 | 932 | ||
906 | /** | 933 | /** |
@@ -935,7 +962,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, | |||
935 | 962 | ||
936 | index = page - alloc->pages; | 963 | index = page - alloc->pages; |
937 | page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; | 964 | page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; |
938 | vma = alloc->vma; | 965 | vma = binder_alloc_get_vma(alloc); |
939 | if (vma) { | 966 | if (vma) { |
940 | if (!mmget_not_zero(alloc->vma_vm_mm)) | 967 | if (!mmget_not_zero(alloc->vma_vm_mm)) |
941 | goto err_mmget; | 968 | goto err_mmget; |