aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/android/binder_alloc_selftest.c
diff options
context:
space:
mode:
authorSherry Yang <sherryy@android.com>2017-08-23 11:46:41 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-08-28 10:47:17 -0400
commit74310e06be4d74dcf67cd108366710dee5c576d5 (patch)
tree57be21314e48f8b30bdd9831eb6a0bb252808bc9 /drivers/android/binder_alloc_selftest.c
parent4175e2b46fd4b9021ef81f18f1be9474b2f45d4a (diff)
android: binder: Move buffer out of area shared with user space
Binder driver allocates buffer meta data in a region that is mapped in user space. These meta data contain pointers in the kernel. This patch allocates buffer meta data on the kernel heap that is not mapped in user space, and uses a pointer to refer to the data mapped. Signed-off-by: Sherry Yang <sherryy@android.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android/binder_alloc_selftest.c')
-rw-r--r--drivers/android/binder_alloc_selftest.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index cc00ab6ee29d..0bf72079a9da 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -105,8 +105,9 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
105 void *page_addr, *end; 105 void *page_addr, *end;
106 int page_index; 106 int page_index;
107 107
108 end = (void *)PAGE_ALIGN((uintptr_t)buffer + size); 108 end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
109 for (page_addr = buffer; page_addr < end; page_addr += PAGE_SIZE) { 109 page_addr = buffer->data;
110 for (; page_addr < end; page_addr += PAGE_SIZE) {
110 page_index = (page_addr - alloc->buffer) / PAGE_SIZE; 111 page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
111 if (!alloc->pages[page_index]) { 112 if (!alloc->pages[page_index]) {
112 pr_err("incorrect alloc state at page index %d\n", 113 pr_err("incorrect alloc state at page index %d\n",
@@ -209,8 +210,7 @@ static void binder_selftest_alloc_size(struct binder_alloc *alloc,
209 * Only BUFFER_NUM - 1 buffer sizes are adjustable since 210 * Only BUFFER_NUM - 1 buffer sizes are adjustable since
210 * we need one giant buffer before getting to the last page. 211 * we need one giant buffer before getting to the last page.
211 */ 212 */
212 back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1] 213 back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
213 - sizeof(struct binder_buffer) * BUFFER_NUM;
214 binder_selftest_free_seq(alloc, front_sizes, seq, 0); 214 binder_selftest_free_seq(alloc, front_sizes, seq, 0);
215 binder_selftest_free_seq(alloc, back_sizes, seq, 0); 215 binder_selftest_free_seq(alloc, back_sizes, seq, 0);
216} 216}
@@ -228,8 +228,7 @@ static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
228 prev = index == 0 ? 0 : end_offset[index - 1]; 228 prev = index == 0 ? 0 : end_offset[index - 1];
229 end = prev; 229 end = prev;
230 230
231 BUILD_BUG_ON((BUFFER_MIN_SIZE + sizeof(struct binder_buffer)) 231 BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
232 * BUFFER_NUM >= PAGE_SIZE);
233 232
234 for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) { 233 for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
235 if (align % 2) 234 if (align % 2)