aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/android/binder_alloc.c
diff options
context:
space:
mode:
authorSherry Yang <sherryy@android.com>2017-08-23 11:46:42 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-08-28 10:47:17 -0400
commitf2517eb76f1f2f7f89761f9db2b202e89931738c (patch)
treed15ad6ea85358e44e7ff92b7188dfa21e36560d9 /drivers/android/binder_alloc.c
parent74310e06be4d74dcf67cd108366710dee5c576d5 (diff)
android: binder: Add global lru shrinker to binder
Hold on to the pages allocated and mapped for transaction buffers until the system is under memory pressure. When that happens, use linux shrinker to free pages. Without using shrinker, patch "android: binder: Move buffer out of area shared with user space" will cause a significant slow down for small transactions that fit into the first page because free list buffer header used to be inlined with buffer data. In addition to prevent the performance regression for small transactions, this patch improves the performance for transactions that take up more than one page. Modify alloc selftest to work with the shrinker change. Test: Run memory intensive applications (Chrome and Camera) to trigger shrinker callbacks. Binder frees memory as expected. Test: Run binderThroughputTest with high memory pressure option enabled. Signed-off-by: Sherry Yang <sherryy@android.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r--drivers/android/binder_alloc.c172
1 files changed, 149 insertions, 23 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index e96659215f25..11a08bf72bcc 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -27,9 +27,12 @@
27#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/list_lru.h>
30#include "binder_alloc.h" 31#include "binder_alloc.h"
31#include "binder_trace.h" 32#include "binder_trace.h"
32 33
34struct list_lru binder_alloc_lru;
35
33static DEFINE_MUTEX(binder_alloc_mmap_lock); 36static DEFINE_MUTEX(binder_alloc_mmap_lock);
34 37
35enum { 38enum {
@@ -188,8 +191,9 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
188{ 191{
189 void *page_addr; 192 void *page_addr;
190 unsigned long user_page_addr; 193 unsigned long user_page_addr;
191 struct page **page; 194 struct binder_lru_page *page;
192 struct mm_struct *mm; 195 struct mm_struct *mm = NULL;
196 bool need_mm = false;
193 197
194 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 198 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
195 "%d: %s pages %pK-%pK\n", alloc->pid, 199 "%d: %s pages %pK-%pK\n", alloc->pid,
@@ -200,9 +204,18 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
200 204
201 trace_binder_update_page_range(alloc, allocate, start, end); 205 trace_binder_update_page_range(alloc, allocate, start, end);
202 206
203 if (vma) 207 if (allocate == 0)
204 mm = NULL; 208 goto free_range;
205 else 209
210 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
211 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
212 if (!page->page_ptr) {
213 need_mm = true;
214 break;
215 }
216 }
217
218 if (!vma && need_mm)
206 mm = get_task_mm(alloc->tsk); 219 mm = get_task_mm(alloc->tsk);
207 220
208 if (mm) { 221 if (mm) {
@@ -215,10 +228,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
215 } 228 }
216 } 229 }
217 230
218 if (allocate == 0) 231 if (!vma && need_mm) {
219 goto free_range;
220
221 if (vma == NULL) {
222 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", 232 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
223 alloc->pid); 233 alloc->pid);
224 goto err_no_vma; 234 goto err_no_vma;
@@ -226,18 +236,33 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
226 236
227 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { 237 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
228 int ret; 238 int ret;
239 bool on_lru;
229 240
230 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; 241 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
231 242
232 BUG_ON(*page); 243 if (page->page_ptr) {
233 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 244 on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
234 if (*page == NULL) { 245 WARN_ON(!on_lru);
246 continue;
247 }
248
249 if (WARN_ON(!vma))
250 goto err_page_ptr_cleared;
251
252 page->page_ptr = alloc_page(GFP_KERNEL |
253 __GFP_HIGHMEM |
254 __GFP_ZERO);
255 if (!page->page_ptr) {
235 pr_err("%d: binder_alloc_buf failed for page at %pK\n", 256 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
236 alloc->pid, page_addr); 257 alloc->pid, page_addr);
237 goto err_alloc_page_failed; 258 goto err_alloc_page_failed;
238 } 259 }
260 page->alloc = alloc;
261 INIT_LIST_HEAD(&page->lru);
262
239 ret = map_kernel_range_noflush((unsigned long)page_addr, 263 ret = map_kernel_range_noflush((unsigned long)page_addr,
240 PAGE_SIZE, PAGE_KERNEL, page); 264 PAGE_SIZE, PAGE_KERNEL,
265 &page->page_ptr);
241 flush_cache_vmap((unsigned long)page_addr, 266 flush_cache_vmap((unsigned long)page_addr,
242 (unsigned long)page_addr + PAGE_SIZE); 267 (unsigned long)page_addr + PAGE_SIZE);
243 if (ret != 1) { 268 if (ret != 1) {
@@ -247,7 +272,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
247 } 272 }
248 user_page_addr = 273 user_page_addr =
249 (uintptr_t)page_addr + alloc->user_buffer_offset; 274 (uintptr_t)page_addr + alloc->user_buffer_offset;
250 ret = vm_insert_page(vma, user_page_addr, page[0]); 275 ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
251 if (ret) { 276 if (ret) {
252 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", 277 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
253 alloc->pid, user_page_addr); 278 alloc->pid, user_page_addr);
@@ -264,16 +289,21 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
264free_range: 289free_range:
265 for (page_addr = end - PAGE_SIZE; page_addr >= start; 290 for (page_addr = end - PAGE_SIZE; page_addr >= start;
266 page_addr -= PAGE_SIZE) { 291 page_addr -= PAGE_SIZE) {
292 bool ret;
293
267 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; 294 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
268 if (vma) 295
269 zap_page_range(vma, (uintptr_t)page_addr + 296 ret = list_lru_add(&binder_alloc_lru, &page->lru);
270 alloc->user_buffer_offset, PAGE_SIZE); 297 WARN_ON(!ret);
298 continue;
299
271err_vm_insert_page_failed: 300err_vm_insert_page_failed:
272 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 301 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
273err_map_kernel_failed: 302err_map_kernel_failed:
274 __free_page(*page); 303 __free_page(page->page_ptr);
275 *page = NULL; 304 page->page_ptr = NULL;
276err_alloc_page_failed: 305err_alloc_page_failed:
306err_page_ptr_cleared:
277 ; 307 ;
278 } 308 }
279err_no_vma: 309err_no_vma:
@@ -731,16 +761,20 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
731 761
732 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { 762 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
733 void *page_addr; 763 void *page_addr;
764 bool on_lru;
734 765
735 if (!alloc->pages[i]) 766 if (!alloc->pages[i].page_ptr)
736 continue; 767 continue;
737 768
769 on_lru = list_lru_del(&binder_alloc_lru,
770 &alloc->pages[i].lru);
738 page_addr = alloc->buffer + i * PAGE_SIZE; 771 page_addr = alloc->buffer + i * PAGE_SIZE;
739 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 772 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
740 "%s: %d: page %d at %pK not freed\n", 773 "%s: %d: page %d at %pK %s\n",
741 __func__, alloc->pid, i, page_addr); 774 __func__, alloc->pid, i, page_addr,
775 on_lru ? "on lru" : "active");
742 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 776 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
743 __free_page(alloc->pages[i]); 777 __free_page(alloc->pages[i].page_ptr);
744 page_count++; 778 page_count++;
745 } 779 }
746 kfree(alloc->pages); 780 kfree(alloc->pages);
@@ -817,6 +851,93 @@ void binder_alloc_vma_close(struct binder_alloc *alloc)
817} 851}
818 852
819/** 853/**
854 * binder_alloc_free_page() - shrinker callback to free pages
855 * @item: item to free
856 * @lock: lock protecting the item
857 * @cb_arg: callback argument
858 *
859 * Called from list_lru_walk() in binder_shrink_scan() to free
860 * up pages when the system is under memory pressure.
861 */
862enum lru_status binder_alloc_free_page(struct list_head *item,
863 struct list_lru_one *lru,
864 spinlock_t *lock,
865 void *cb_arg)
866{
867 struct mm_struct *mm = NULL;
868 struct binder_lru_page *page = container_of(item,
869 struct binder_lru_page,
870 lru);
871 struct binder_alloc *alloc;
872 uintptr_t page_addr;
873 size_t index;
874
875 alloc = page->alloc;
876 if (!mutex_trylock(&alloc->mutex))
877 goto err_get_alloc_mutex_failed;
878
879 if (!page->page_ptr)
880 goto err_page_already_freed;
881
882 index = page - alloc->pages;
883 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
884 if (alloc->vma) {
885 mm = get_task_mm(alloc->tsk);
886 if (!mm)
887 goto err_get_task_mm_failed;
888 if (!down_write_trylock(&mm->mmap_sem))
889 goto err_down_write_mmap_sem_failed;
890
891 zap_page_range(alloc->vma,
892 page_addr + alloc->user_buffer_offset,
893 PAGE_SIZE);
894
895 up_write(&mm->mmap_sem);
896 mmput(mm);
897 }
898
899 unmap_kernel_range(page_addr, PAGE_SIZE);
900 __free_page(page->page_ptr);
901 page->page_ptr = NULL;
902
903 list_lru_isolate(lru, item);
904
905 mutex_unlock(&alloc->mutex);
906 return LRU_REMOVED;
907
908err_down_write_mmap_sem_failed:
909 mmput(mm);
910err_get_task_mm_failed:
911err_page_already_freed:
912 mutex_unlock(&alloc->mutex);
913err_get_alloc_mutex_failed:
914 return LRU_SKIP;
915}
916
917static unsigned long
918binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
919{
920 unsigned long ret = list_lru_count(&binder_alloc_lru);
921 return ret;
922}
923
924static unsigned long
925binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
926{
927 unsigned long ret;
928
929 ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
930 NULL, sc->nr_to_scan);
931 return ret;
932}
933
934struct shrinker binder_shrinker = {
935 .count_objects = binder_shrink_count,
936 .scan_objects = binder_shrink_scan,
937 .seeks = DEFAULT_SEEKS,
938};
939
940/**
820 * binder_alloc_init() - called by binder_open() for per-proc initialization 941 * binder_alloc_init() - called by binder_open() for per-proc initialization
821 * @alloc: binder_alloc for this proc 942 * @alloc: binder_alloc for this proc
822 * 943 *
@@ -830,3 +951,8 @@ void binder_alloc_init(struct binder_alloc *alloc)
830 mutex_init(&alloc->mutex); 951 mutex_init(&alloc->mutex);
831} 952}
832 953
954void binder_alloc_shrinker_init(void)
955{
956 list_lru_init(&binder_alloc_lru);
957 register_shrinker(&binder_shrinker);
958}