diff options
author | Todd Kjos <tkjos@android.com> | 2019-02-08 13:35:18 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2019-02-12 04:43:57 -0500 |
commit | 880211667b203dd32724f3be224c44c0400aa0a6 (patch) | |
tree | ab0537523357c2c341c2b883503d84f7e00c9656 /drivers/android/binder_alloc.c | |
parent | db6b0b810bf945d1991917ffce0e93383101f2fa (diff) |
binder: remove kernel vm_area for buffer space
Remove the kernel's vm_area and the code that maps
buffer pages into it.
Signed-off-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r-- | drivers/android/binder_alloc.c | 40 |
1 files changed, 2 insertions, 38 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 2eebff4be83e..d4cbe4b3947a 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c | |||
@@ -265,16 +265,6 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, | |||
265 | page->alloc = alloc; | 265 | page->alloc = alloc; |
266 | INIT_LIST_HEAD(&page->lru); | 266 | INIT_LIST_HEAD(&page->lru); |
267 | 267 | ||
268 | ret = map_kernel_range_noflush((unsigned long)page_addr, | ||
269 | PAGE_SIZE, PAGE_KERNEL, | ||
270 | &page->page_ptr); | ||
271 | flush_cache_vmap((unsigned long)page_addr, | ||
272 | (unsigned long)page_addr + PAGE_SIZE); | ||
273 | if (ret != 1) { | ||
274 | pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n", | ||
275 | alloc->pid, page_addr); | ||
276 | goto err_map_kernel_failed; | ||
277 | } | ||
278 | user_page_addr = | 268 | user_page_addr = |
279 | (uintptr_t)page_addr + alloc->user_buffer_offset; | 269 | (uintptr_t)page_addr + alloc->user_buffer_offset; |
280 | ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr); | 270 | ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr); |
@@ -314,8 +304,6 @@ free_range: | |||
314 | continue; | 304 | continue; |
315 | 305 | ||
316 | err_vm_insert_page_failed: | 306 | err_vm_insert_page_failed: |
317 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); | ||
318 | err_map_kernel_failed: | ||
319 | __free_page(page->page_ptr); | 307 | __free_page(page->page_ptr); |
320 | page->page_ptr = NULL; | 308 | page->page_ptr = NULL; |
321 | err_alloc_page_failed: | 309 | err_alloc_page_failed: |
@@ -695,7 +683,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, | |||
695 | struct vm_area_struct *vma) | 683 | struct vm_area_struct *vma) |
696 | { | 684 | { |
697 | int ret; | 685 | int ret; |
698 | struct vm_struct *area; | ||
699 | const char *failure_string; | 686 | const char *failure_string; |
700 | struct binder_buffer *buffer; | 687 | struct binder_buffer *buffer; |
701 | 688 | ||
@@ -706,28 +693,10 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, | |||
706 | goto err_already_mapped; | 693 | goto err_already_mapped; |
707 | } | 694 | } |
708 | 695 | ||
709 | area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC); | 696 | alloc->buffer = (void *)vma->vm_start; |
710 | if (area == NULL) { | 697 | alloc->user_buffer_offset = 0; |
711 | ret = -ENOMEM; | ||
712 | failure_string = "get_vm_area"; | ||
713 | goto err_get_vm_area_failed; | ||
714 | } | ||
715 | alloc->buffer = area->addr; | ||
716 | alloc->user_buffer_offset = | ||
717 | vma->vm_start - (uintptr_t)alloc->buffer; | ||
718 | mutex_unlock(&binder_alloc_mmap_lock); | 698 | mutex_unlock(&binder_alloc_mmap_lock); |
719 | 699 | ||
720 | #ifdef CONFIG_CPU_CACHE_VIPT | ||
721 | if (cache_is_vipt_aliasing()) { | ||
722 | while (CACHE_COLOUR( | ||
723 | (vma->vm_start ^ (uint32_t)alloc->buffer))) { | ||
724 | pr_info("%s: %d %lx-%lx maps %pK bad alignment\n", | ||
725 | __func__, alloc->pid, vma->vm_start, | ||
726 | vma->vm_end, alloc->buffer); | ||
727 | vma->vm_start += PAGE_SIZE; | ||
728 | } | ||
729 | } | ||
730 | #endif | ||
731 | alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE, | 700 | alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE, |
732 | sizeof(alloc->pages[0]), | 701 | sizeof(alloc->pages[0]), |
733 | GFP_KERNEL); | 702 | GFP_KERNEL); |
@@ -760,9 +729,7 @@ err_alloc_buf_struct_failed: | |||
760 | alloc->pages = NULL; | 729 | alloc->pages = NULL; |
761 | err_alloc_pages_failed: | 730 | err_alloc_pages_failed: |
762 | mutex_lock(&binder_alloc_mmap_lock); | 731 | mutex_lock(&binder_alloc_mmap_lock); |
763 | vfree(alloc->buffer); | ||
764 | alloc->buffer = NULL; | 732 | alloc->buffer = NULL; |
765 | err_get_vm_area_failed: | ||
766 | err_already_mapped: | 733 | err_already_mapped: |
767 | mutex_unlock(&binder_alloc_mmap_lock); | 734 | mutex_unlock(&binder_alloc_mmap_lock); |
768 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, | 735 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, |
@@ -821,12 +788,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) | |||
821 | "%s: %d: page %d at %pK %s\n", | 788 | "%s: %d: page %d at %pK %s\n", |
822 | __func__, alloc->pid, i, page_addr, | 789 | __func__, alloc->pid, i, page_addr, |
823 | on_lru ? "on lru" : "active"); | 790 | on_lru ? "on lru" : "active"); |
824 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); | ||
825 | __free_page(alloc->pages[i].page_ptr); | 791 | __free_page(alloc->pages[i].page_ptr); |
826 | page_count++; | 792 | page_count++; |
827 | } | 793 | } |
828 | kfree(alloc->pages); | 794 | kfree(alloc->pages); |
829 | vfree(alloc->buffer); | ||
830 | } | 795 | } |
831 | mutex_unlock(&alloc->mutex); | 796 | mutex_unlock(&alloc->mutex); |
832 | if (alloc->vma_vm_mm) | 797 | if (alloc->vma_vm_mm) |
@@ -988,7 +953,6 @@ enum lru_status binder_alloc_free_page(struct list_head *item, | |||
988 | 953 | ||
989 | trace_binder_unmap_kernel_start(alloc, index); | 954 | trace_binder_unmap_kernel_start(alloc, index); |
990 | 955 | ||
991 | unmap_kernel_range(page_addr, PAGE_SIZE); | ||
992 | __free_page(page->page_ptr); | 956 | __free_page(page->page_ptr); |
993 | page->page_ptr = NULL; | 957 | page->page_ptr = NULL; |
994 | 958 | ||