summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSherry Yang <sherryy@android.com>2017-08-23 11:46:42 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-08-28 10:47:17 -0400
commitf2517eb76f1f2f7f89761f9db2b202e89931738c (patch)
treed15ad6ea85358e44e7ff92b7188dfa21e36560d9
parent74310e06be4d74dcf67cd108366710dee5c576d5 (diff)
android: binder: Add global lru shrinker to binder
Hold on to the pages allocated and mapped for transaction buffers until the system is under memory pressure. When that happens, use linux shrinker to free pages. Without using shrinker, patch "android: binder: Move buffer out of area shared with user space" will cause a significant slow down for small transactions that fit into the first page because free list buffer header used to be inlined with buffer data. In addition to prevent the performance regression for small transactions, this patch improves the performance for transactions that take up more than one page. Modify alloc selftest to work with the shrinker change. Test: Run memory intensive applications (Chrome and Camera) to trigger shrinker callbacks. Binder frees memory as expected. Test: Run binderThroughputTest with high memory pressure option enabled. Signed-off-by: Sherry Yang <sherryy@android.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/android/binder_alloc.c172
-rw-r--r--drivers/android/binder_alloc.h23
-rw-r--r--drivers/android/binder_alloc_selftest.c68
4 files changed, 225 insertions, 40 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index e4b6055aafc7..ba9e613b42d6 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -5243,6 +5243,8 @@ static int __init binder_init(void)
5243 struct binder_device *device; 5243 struct binder_device *device;
5244 struct hlist_node *tmp; 5244 struct hlist_node *tmp;
5245 5245
5246 binder_alloc_shrinker_init();
5247
5246 atomic_set(&binder_transaction_log.cur, ~0U); 5248 atomic_set(&binder_transaction_log.cur, ~0U);
5247 atomic_set(&binder_transaction_log_failed.cur, ~0U); 5249 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5248 5250
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index e96659215f25..11a08bf72bcc 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -27,9 +27,12 @@
27#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/list_lru.h>
30#include "binder_alloc.h" 31#include "binder_alloc.h"
31#include "binder_trace.h" 32#include "binder_trace.h"
32 33
34struct list_lru binder_alloc_lru;
35
33static DEFINE_MUTEX(binder_alloc_mmap_lock); 36static DEFINE_MUTEX(binder_alloc_mmap_lock);
34 37
35enum { 38enum {
@@ -188,8 +191,9 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
188{ 191{
189 void *page_addr; 192 void *page_addr;
190 unsigned long user_page_addr; 193 unsigned long user_page_addr;
191 struct page **page; 194 struct binder_lru_page *page;
192 struct mm_struct *mm; 195 struct mm_struct *mm = NULL;
196 bool need_mm = false;
193 197
194 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 198 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
195 "%d: %s pages %pK-%pK\n", alloc->pid, 199 "%d: %s pages %pK-%pK\n", alloc->pid,
@@ -200,9 +204,18 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
200 204
201 trace_binder_update_page_range(alloc, allocate, start, end); 205 trace_binder_update_page_range(alloc, allocate, start, end);
202 206
203 if (vma) 207 if (allocate == 0)
204 mm = NULL; 208 goto free_range;
205 else 209
210 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
211 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
212 if (!page->page_ptr) {
213 need_mm = true;
214 break;
215 }
216 }
217
218 if (!vma && need_mm)
206 mm = get_task_mm(alloc->tsk); 219 mm = get_task_mm(alloc->tsk);
207 220
208 if (mm) { 221 if (mm) {
@@ -215,10 +228,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
215 } 228 }
216 } 229 }
217 230
218 if (allocate == 0) 231 if (!vma && need_mm) {
219 goto free_range;
220
221 if (vma == NULL) {
222 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", 232 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
223 alloc->pid); 233 alloc->pid);
224 goto err_no_vma; 234 goto err_no_vma;
@@ -226,18 +236,33 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
226 236
227 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { 237 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
228 int ret; 238 int ret;
239 bool on_lru;
229 240
230 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; 241 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
231 242
232 BUG_ON(*page); 243 if (page->page_ptr) {
233 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 244 on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
234 if (*page == NULL) { 245 WARN_ON(!on_lru);
246 continue;
247 }
248
249 if (WARN_ON(!vma))
250 goto err_page_ptr_cleared;
251
252 page->page_ptr = alloc_page(GFP_KERNEL |
253 __GFP_HIGHMEM |
254 __GFP_ZERO);
255 if (!page->page_ptr) {
235 pr_err("%d: binder_alloc_buf failed for page at %pK\n", 256 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
236 alloc->pid, page_addr); 257 alloc->pid, page_addr);
237 goto err_alloc_page_failed; 258 goto err_alloc_page_failed;
238 } 259 }
260 page->alloc = alloc;
261 INIT_LIST_HEAD(&page->lru);
262
239 ret = map_kernel_range_noflush((unsigned long)page_addr, 263 ret = map_kernel_range_noflush((unsigned long)page_addr,
240 PAGE_SIZE, PAGE_KERNEL, page); 264 PAGE_SIZE, PAGE_KERNEL,
265 &page->page_ptr);
241 flush_cache_vmap((unsigned long)page_addr, 266 flush_cache_vmap((unsigned long)page_addr,
242 (unsigned long)page_addr + PAGE_SIZE); 267 (unsigned long)page_addr + PAGE_SIZE);
243 if (ret != 1) { 268 if (ret != 1) {
@@ -247,7 +272,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
247 } 272 }
248 user_page_addr = 273 user_page_addr =
249 (uintptr_t)page_addr + alloc->user_buffer_offset; 274 (uintptr_t)page_addr + alloc->user_buffer_offset;
250 ret = vm_insert_page(vma, user_page_addr, page[0]); 275 ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
251 if (ret) { 276 if (ret) {
252 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", 277 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
253 alloc->pid, user_page_addr); 278 alloc->pid, user_page_addr);
@@ -264,16 +289,21 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
264free_range: 289free_range:
265 for (page_addr = end - PAGE_SIZE; page_addr >= start; 290 for (page_addr = end - PAGE_SIZE; page_addr >= start;
266 page_addr -= PAGE_SIZE) { 291 page_addr -= PAGE_SIZE) {
292 bool ret;
293
267 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; 294 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
268 if (vma) 295
269 zap_page_range(vma, (uintptr_t)page_addr + 296 ret = list_lru_add(&binder_alloc_lru, &page->lru);
270 alloc->user_buffer_offset, PAGE_SIZE); 297 WARN_ON(!ret);
298 continue;
299
271err_vm_insert_page_failed: 300err_vm_insert_page_failed:
272 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 301 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
273err_map_kernel_failed: 302err_map_kernel_failed:
274 __free_page(*page); 303 __free_page(page->page_ptr);
275 *page = NULL; 304 page->page_ptr = NULL;
276err_alloc_page_failed: 305err_alloc_page_failed:
306err_page_ptr_cleared:
277 ; 307 ;
278 } 308 }
279err_no_vma: 309err_no_vma:
@@ -731,16 +761,20 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
731 761
732 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { 762 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
733 void *page_addr; 763 void *page_addr;
764 bool on_lru;
734 765
735 if (!alloc->pages[i]) 766 if (!alloc->pages[i].page_ptr)
736 continue; 767 continue;
737 768
769 on_lru = list_lru_del(&binder_alloc_lru,
770 &alloc->pages[i].lru);
738 page_addr = alloc->buffer + i * PAGE_SIZE; 771 page_addr = alloc->buffer + i * PAGE_SIZE;
739 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 772 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
740 "%s: %d: page %d at %pK not freed\n", 773 "%s: %d: page %d at %pK %s\n",
741 __func__, alloc->pid, i, page_addr); 774 __func__, alloc->pid, i, page_addr,
775 on_lru ? "on lru" : "active");
742 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 776 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
743 __free_page(alloc->pages[i]); 777 __free_page(alloc->pages[i].page_ptr);
744 page_count++; 778 page_count++;
745 } 779 }
746 kfree(alloc->pages); 780 kfree(alloc->pages);
@@ -817,6 +851,93 @@ void binder_alloc_vma_close(struct binder_alloc *alloc)
817} 851}
818 852
819/** 853/**
854 * binder_alloc_free_page() - shrinker callback to free pages
855 * @item: item to free
856 * @lock: lock protecting the item
857 * @cb_arg: callback argument
858 *
859 * Called from list_lru_walk() in binder_shrink_scan() to free
860 * up pages when the system is under memory pressure.
861 */
862enum lru_status binder_alloc_free_page(struct list_head *item,
863 struct list_lru_one *lru,
864 spinlock_t *lock,
865 void *cb_arg)
866{
867 struct mm_struct *mm = NULL;
868 struct binder_lru_page *page = container_of(item,
869 struct binder_lru_page,
870 lru);
871 struct binder_alloc *alloc;
872 uintptr_t page_addr;
873 size_t index;
874
875 alloc = page->alloc;
876 if (!mutex_trylock(&alloc->mutex))
877 goto err_get_alloc_mutex_failed;
878
879 if (!page->page_ptr)
880 goto err_page_already_freed;
881
882 index = page - alloc->pages;
883 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
884 if (alloc->vma) {
885 mm = get_task_mm(alloc->tsk);
886 if (!mm)
887 goto err_get_task_mm_failed;
888 if (!down_write_trylock(&mm->mmap_sem))
889 goto err_down_write_mmap_sem_failed;
890
891 zap_page_range(alloc->vma,
892 page_addr + alloc->user_buffer_offset,
893 PAGE_SIZE);
894
895 up_write(&mm->mmap_sem);
896 mmput(mm);
897 }
898
899 unmap_kernel_range(page_addr, PAGE_SIZE);
900 __free_page(page->page_ptr);
901 page->page_ptr = NULL;
902
903 list_lru_isolate(lru, item);
904
905 mutex_unlock(&alloc->mutex);
906 return LRU_REMOVED;
907
908err_down_write_mmap_sem_failed:
909 mmput(mm);
910err_get_task_mm_failed:
911err_page_already_freed:
912 mutex_unlock(&alloc->mutex);
913err_get_alloc_mutex_failed:
914 return LRU_SKIP;
915}
916
917static unsigned long
918binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
919{
920 unsigned long ret = list_lru_count(&binder_alloc_lru);
921 return ret;
922}
923
924static unsigned long
925binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
926{
927 unsigned long ret;
928
929 ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
930 NULL, sc->nr_to_scan);
931 return ret;
932}
933
934struct shrinker binder_shrinker = {
935 .count_objects = binder_shrink_count,
936 .scan_objects = binder_shrink_scan,
937 .seeks = DEFAULT_SEEKS,
938};
939
940/**
820 * binder_alloc_init() - called by binder_open() for per-proc initialization 941 * binder_alloc_init() - called by binder_open() for per-proc initialization
821 * @alloc: binder_alloc for this proc 942 * @alloc: binder_alloc for this proc
822 * 943 *
@@ -830,3 +951,8 @@ void binder_alloc_init(struct binder_alloc *alloc)
830 mutex_init(&alloc->mutex); 951 mutex_init(&alloc->mutex);
831} 952}
832 953
954void binder_alloc_shrinker_init(void)
955{
956 list_lru_init(&binder_alloc_lru);
957 register_shrinker(&binder_shrinker);
958}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index dd5649bf6469..fa707cc63393 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -21,7 +21,9 @@
21#include <linux/rtmutex.h> 21#include <linux/rtmutex.h>
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/list_lru.h>
24 25
26extern struct list_lru binder_alloc_lru;
25struct binder_transaction; 27struct binder_transaction;
26 28
27/** 29/**
@@ -61,6 +63,18 @@ struct binder_buffer {
61}; 63};
62 64
63/** 65/**
66 * struct binder_lru_page - page object used for binder shrinker
67 * @page_ptr: pointer to physical page in mmap'd space
68 * @lru: entry in binder_alloc_lru
69 * @alloc: binder_alloc for a proc
70 */
71struct binder_lru_page {
72 struct list_head lru;
73 struct page *page_ptr;
74 struct binder_alloc *alloc;
75};
76
77/**
64 * struct binder_alloc - per-binder proc state for binder allocator 78 * struct binder_alloc - per-binder proc state for binder allocator
65 * @vma: vm_area_struct passed to mmap_handler 79 * @vma: vm_area_struct passed to mmap_handler
66 * (invarient after mmap) 80 * (invarient after mmap)
@@ -75,8 +89,7 @@ struct binder_buffer {
75 * @allocated_buffers: rb tree of allocated buffers sorted by address 89 * @allocated_buffers: rb tree of allocated buffers sorted by address
76 * @free_async_space: VA space available for async buffers. This is 90 * @free_async_space: VA space available for async buffers. This is
77 * initialized at mmap time to 1/2 the full VA space 91 * initialized at mmap time to 1/2 the full VA space
78 * @pages: array of physical page addresses for each 92 * @pages: array of binder_lru_page
79 * page of mmap'd space
80 * @buffer_size: size of address space specified via mmap 93 * @buffer_size: size of address space specified via mmap
81 * @pid: pid for associated binder_proc (invariant after init) 94 * @pid: pid for associated binder_proc (invariant after init)
82 * 95 *
@@ -96,7 +109,7 @@ struct binder_alloc {
96 struct rb_root free_buffers; 109 struct rb_root free_buffers;
97 struct rb_root allocated_buffers; 110 struct rb_root allocated_buffers;
98 size_t free_async_space; 111 size_t free_async_space;
99 struct page **pages; 112 struct binder_lru_page *pages;
100 size_t buffer_size; 113 size_t buffer_size;
101 uint32_t buffer_free; 114 uint32_t buffer_free;
102 int pid; 115 int pid;
@@ -107,12 +120,16 @@ void binder_selftest_alloc(struct binder_alloc *alloc);
107#else 120#else
108static inline void binder_selftest_alloc(struct binder_alloc *alloc) {} 121static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
109#endif 122#endif
123enum lru_status binder_alloc_free_page(struct list_head *item,
124 struct list_lru_one *lru,
125 spinlock_t *lock, void *cb_arg);
110extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, 126extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
111 size_t data_size, 127 size_t data_size,
112 size_t offsets_size, 128 size_t offsets_size,
113 size_t extra_buffers_size, 129 size_t extra_buffers_size,
114 int is_async); 130 int is_async);
115extern void binder_alloc_init(struct binder_alloc *alloc); 131extern void binder_alloc_init(struct binder_alloc *alloc);
132void binder_alloc_shrinker_init(void);
116extern void binder_alloc_vma_close(struct binder_alloc *alloc); 133extern void binder_alloc_vma_close(struct binder_alloc *alloc);
117extern struct binder_buffer * 134extern struct binder_buffer *
118binder_alloc_prepare_to_free(struct binder_alloc *alloc, 135binder_alloc_prepare_to_free(struct binder_alloc *alloc,
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index 0bf72079a9da..8bd7bcef967d 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -109,9 +109,11 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
109 page_addr = buffer->data; 109 page_addr = buffer->data;
110 for (; page_addr < end; page_addr += PAGE_SIZE) { 110 for (; page_addr < end; page_addr += PAGE_SIZE) {
111 page_index = (page_addr - alloc->buffer) / PAGE_SIZE; 111 page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
112 if (!alloc->pages[page_index]) { 112 if (!alloc->pages[page_index].page_ptr ||
113 pr_err("incorrect alloc state at page index %d\n", 113 !list_empty(&alloc->pages[page_index].lru)) {
114 page_index); 114 pr_err("expect alloc but is %s at page index %d\n",
115 alloc->pages[page_index].page_ptr ?
116 "lru" : "free", page_index);
115 return false; 117 return false;
116 } 118 }
117 } 119 }
@@ -137,28 +139,63 @@ static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
137 139
138static void binder_selftest_free_buf(struct binder_alloc *alloc, 140static void binder_selftest_free_buf(struct binder_alloc *alloc,
139 struct binder_buffer *buffers[], 141 struct binder_buffer *buffers[],
140 size_t *sizes, int *seq) 142 size_t *sizes, int *seq, size_t end)
141{ 143{
142 int i; 144 int i;
143 145
144 for (i = 0; i < BUFFER_NUM; i++) 146 for (i = 0; i < BUFFER_NUM; i++)
145 binder_alloc_free_buf(alloc, buffers[seq[i]]); 147 binder_alloc_free_buf(alloc, buffers[seq[i]]);
146 148
149 for (i = 0; i < end / PAGE_SIZE; i++) {
150 /**
151 * Error message on a free page can be false positive
152 * if binder shrinker ran during binder_alloc_free_buf
153 * calls above.
154 */
155 if (list_empty(&alloc->pages[i].lru)) {
156 pr_err_size_seq(sizes, seq);
157 pr_err("expect lru but is %s at page index %d\n",
158 alloc->pages[i].page_ptr ? "alloc" : "free", i);
159 binder_selftest_failures++;
160 }
161 }
162}
163
164static void binder_selftest_free_page(struct binder_alloc *alloc)
165{
166 int i;
167 unsigned long count;
168
169 while ((count = list_lru_count(&binder_alloc_lru))) {
170 list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
171 NULL, count);
172 }
173
147 for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) { 174 for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
148 if ((!alloc->pages[i]) == (i == 0)) { 175 if (alloc->pages[i].page_ptr) {
149 pr_err("incorrect free state at page index %d\n", i); 176 pr_err("expect free but is %s at page index %d\n",
177 list_empty(&alloc->pages[i].lru) ?
178 "alloc" : "lru", i);
150 binder_selftest_failures++; 179 binder_selftest_failures++;
151 } 180 }
152 } 181 }
153} 182}
154 183
155static void binder_selftest_alloc_free(struct binder_alloc *alloc, 184static void binder_selftest_alloc_free(struct binder_alloc *alloc,
156 size_t *sizes, int *seq) 185 size_t *sizes, int *seq, size_t end)
157{ 186{
158 struct binder_buffer *buffers[BUFFER_NUM]; 187 struct binder_buffer *buffers[BUFFER_NUM];
159 188
160 binder_selftest_alloc_buf(alloc, buffers, sizes, seq); 189 binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
161 binder_selftest_free_buf(alloc, buffers, sizes, seq); 190 binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
191
192 /* Allocate from lru. */
193 binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
194 if (list_lru_count(&binder_alloc_lru))
195 pr_err("lru list should be empty but is not\n");
196
197 binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
198 binder_selftest_free_page(alloc);
162} 199}
163 200
164static bool is_dup(int *seq, int index, int val) 201static bool is_dup(int *seq, int index, int val)
@@ -174,19 +211,20 @@ static bool is_dup(int *seq, int index, int val)
174 211
175/* Generate BUFFER_NUM factorial free orders. */ 212/* Generate BUFFER_NUM factorial free orders. */
176static void binder_selftest_free_seq(struct binder_alloc *alloc, 213static void binder_selftest_free_seq(struct binder_alloc *alloc,
177 size_t *sizes, int *seq, int index) 214 size_t *sizes, int *seq,
215 int index, size_t end)
178{ 216{
179 int i; 217 int i;
180 218
181 if (index == BUFFER_NUM) { 219 if (index == BUFFER_NUM) {
182 binder_selftest_alloc_free(alloc, sizes, seq); 220 binder_selftest_alloc_free(alloc, sizes, seq, end);
183 return; 221 return;
184 } 222 }
185 for (i = 0; i < BUFFER_NUM; i++) { 223 for (i = 0; i < BUFFER_NUM; i++) {
186 if (is_dup(seq, index, i)) 224 if (is_dup(seq, index, i))
187 continue; 225 continue;
188 seq[index] = i; 226 seq[index] = i;
189 binder_selftest_free_seq(alloc, sizes, seq, index + 1); 227 binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
190 } 228 }
191} 229}
192 230
@@ -211,8 +249,9 @@ static void binder_selftest_alloc_size(struct binder_alloc *alloc,
211 * we need one giant buffer before getting to the last page. 249 * we need one giant buffer before getting to the last page.
212 */ 250 */
213 back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1]; 251 back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
214 binder_selftest_free_seq(alloc, front_sizes, seq, 0); 252 binder_selftest_free_seq(alloc, front_sizes, seq, 0,
215 binder_selftest_free_seq(alloc, back_sizes, seq, 0); 253 end_offset[BUFFER_NUM - 1]);
254 binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
216} 255}
217 256
218static void binder_selftest_alloc_offset(struct binder_alloc *alloc, 257static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
@@ -246,7 +285,8 @@ static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
246 * 285 *
247 * Allocate BUFFER_NUM buffers to cover all page alignment cases, 286 * Allocate BUFFER_NUM buffers to cover all page alignment cases,
248 * then free them in all orders possible. Check that pages are 287 * then free them in all orders possible. Check that pages are
249 * allocated after buffer alloc and freed after freeing buffer. 288 * correctly allocated, put onto lru when buffers are freed, and
289 * are freed when binder_alloc_free_page is called.
250 */ 290 */
251void binder_selftest_alloc(struct binder_alloc *alloc) 291void binder_selftest_alloc(struct binder_alloc *alloc)
252{ 292{