aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/android/binder_alloc_selftest.c
diff options
context:
space:
mode:
authorSherry Yang <sherryy@android.com>2017-08-23 11:46:42 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-08-28 10:47:17 -0400
commitf2517eb76f1f2f7f89761f9db2b202e89931738c (patch)
treed15ad6ea85358e44e7ff92b7188dfa21e36560d9 /drivers/android/binder_alloc_selftest.c
parent74310e06be4d74dcf67cd108366710dee5c576d5 (diff)
android: binder: Add global lru shrinker to binder
Hold on to the pages allocated and mapped for transaction buffers until the system is under memory pressure. When that happens, use linux shrinker to free pages. Without using shrinker, patch "android: binder: Move buffer out of area shared with user space" will cause a significant slow down for small transactions that fit into the first page because free list buffer header used to be inlined with buffer data. In addition to prevent the performance regression for small transactions, this patch improves the performance for transactions that take up more than one page. Modify alloc selftest to work with the shrinker change. Test: Run memory intensive applications (Chrome and Camera) to trigger shrinker callbacks. Binder frees memory as expected. Test: Run binderThroughputTest with high memory pressure option enabled. Signed-off-by: Sherry Yang <sherryy@android.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android/binder_alloc_selftest.c')
-rw-r--r--drivers/android/binder_alloc_selftest.c68
1 files changed, 54 insertions, 14 deletions
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index 0bf72079a9da..8bd7bcef967d 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -109,9 +109,11 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
109 page_addr = buffer->data; 109 page_addr = buffer->data;
110 for (; page_addr < end; page_addr += PAGE_SIZE) { 110 for (; page_addr < end; page_addr += PAGE_SIZE) {
111 page_index = (page_addr - alloc->buffer) / PAGE_SIZE; 111 page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
112 if (!alloc->pages[page_index]) { 112 if (!alloc->pages[page_index].page_ptr ||
113 pr_err("incorrect alloc state at page index %d\n", 113 !list_empty(&alloc->pages[page_index].lru)) {
114 page_index); 114 pr_err("expect alloc but is %s at page index %d\n",
115 alloc->pages[page_index].page_ptr ?
116 "lru" : "free", page_index);
115 return false; 117 return false;
116 } 118 }
117 } 119 }
@@ -137,28 +139,63 @@ static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
137 139
138static void binder_selftest_free_buf(struct binder_alloc *alloc, 140static void binder_selftest_free_buf(struct binder_alloc *alloc,
139 struct binder_buffer *buffers[], 141 struct binder_buffer *buffers[],
140 size_t *sizes, int *seq) 142 size_t *sizes, int *seq, size_t end)
141{ 143{
142 int i; 144 int i;
143 145
144 for (i = 0; i < BUFFER_NUM; i++) 146 for (i = 0; i < BUFFER_NUM; i++)
145 binder_alloc_free_buf(alloc, buffers[seq[i]]); 147 binder_alloc_free_buf(alloc, buffers[seq[i]]);
146 148
149 for (i = 0; i < end / PAGE_SIZE; i++) {
150 /**
151 * Error message on a free page can be false positive
152 * if binder shrinker ran during binder_alloc_free_buf
153 * calls above.
154 */
155 if (list_empty(&alloc->pages[i].lru)) {
156 pr_err_size_seq(sizes, seq);
157 pr_err("expect lru but is %s at page index %d\n",
158 alloc->pages[i].page_ptr ? "alloc" : "free", i);
159 binder_selftest_failures++;
160 }
161 }
162}
163
164static void binder_selftest_free_page(struct binder_alloc *alloc)
165{
166 int i;
167 unsigned long count;
168
169 while ((count = list_lru_count(&binder_alloc_lru))) {
170 list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
171 NULL, count);
172 }
173
147 for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) { 174 for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
148 if ((!alloc->pages[i]) == (i == 0)) { 175 if (alloc->pages[i].page_ptr) {
149 pr_err("incorrect free state at page index %d\n", i); 176 pr_err("expect free but is %s at page index %d\n",
177 list_empty(&alloc->pages[i].lru) ?
178 "alloc" : "lru", i);
150 binder_selftest_failures++; 179 binder_selftest_failures++;
151 } 180 }
152 } 181 }
153} 182}
154 183
155static void binder_selftest_alloc_free(struct binder_alloc *alloc, 184static void binder_selftest_alloc_free(struct binder_alloc *alloc,
156 size_t *sizes, int *seq) 185 size_t *sizes, int *seq, size_t end)
157{ 186{
158 struct binder_buffer *buffers[BUFFER_NUM]; 187 struct binder_buffer *buffers[BUFFER_NUM];
159 188
160 binder_selftest_alloc_buf(alloc, buffers, sizes, seq); 189 binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
161 binder_selftest_free_buf(alloc, buffers, sizes, seq); 190 binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
191
192 /* Allocate from lru. */
193 binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
194 if (list_lru_count(&binder_alloc_lru))
195 pr_err("lru list should be empty but is not\n");
196
197 binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
198 binder_selftest_free_page(alloc);
162} 199}
163 200
164static bool is_dup(int *seq, int index, int val) 201static bool is_dup(int *seq, int index, int val)
@@ -174,19 +211,20 @@ static bool is_dup(int *seq, int index, int val)
174 211
175/* Generate BUFFER_NUM factorial free orders. */ 212/* Generate BUFFER_NUM factorial free orders. */
176static void binder_selftest_free_seq(struct binder_alloc *alloc, 213static void binder_selftest_free_seq(struct binder_alloc *alloc,
177 size_t *sizes, int *seq, int index) 214 size_t *sizes, int *seq,
215 int index, size_t end)
178{ 216{
179 int i; 217 int i;
180 218
181 if (index == BUFFER_NUM) { 219 if (index == BUFFER_NUM) {
182 binder_selftest_alloc_free(alloc, sizes, seq); 220 binder_selftest_alloc_free(alloc, sizes, seq, end);
183 return; 221 return;
184 } 222 }
185 for (i = 0; i < BUFFER_NUM; i++) { 223 for (i = 0; i < BUFFER_NUM; i++) {
186 if (is_dup(seq, index, i)) 224 if (is_dup(seq, index, i))
187 continue; 225 continue;
188 seq[index] = i; 226 seq[index] = i;
189 binder_selftest_free_seq(alloc, sizes, seq, index + 1); 227 binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
190 } 228 }
191} 229}
192 230
@@ -211,8 +249,9 @@ static void binder_selftest_alloc_size(struct binder_alloc *alloc,
211 * we need one giant buffer before getting to the last page. 249 * we need one giant buffer before getting to the last page.
212 */ 250 */
213 back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1]; 251 back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
214 binder_selftest_free_seq(alloc, front_sizes, seq, 0); 252 binder_selftest_free_seq(alloc, front_sizes, seq, 0,
215 binder_selftest_free_seq(alloc, back_sizes, seq, 0); 253 end_offset[BUFFER_NUM - 1]);
254 binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
216} 255}
217 256
218static void binder_selftest_alloc_offset(struct binder_alloc *alloc, 257static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
@@ -246,7 +285,8 @@ static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
246 * 285 *
247 * Allocate BUFFER_NUM buffers to cover all page alignment cases, 286 * Allocate BUFFER_NUM buffers to cover all page alignment cases,
248 * then free them in all orders possible. Check that pages are 287 * then free them in all orders possible. Check that pages are
249 * allocated after buffer alloc and freed after freeing buffer. 288 * correctly allocated, put onto lru when buffers are freed, and
289 * are freed when binder_alloc_free_page is called.
250 */ 290 */
251void binder_selftest_alloc(struct binder_alloc *alloc) 291void binder_selftest_alloc(struct binder_alloc *alloc)
252{ 292{