diff options
author | Todd Kjos <tkjos@android.com> | 2019-02-08 13:35:19 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2019-02-12 04:43:57 -0500 |
commit | c41358a5f5217abd7c051e8d42397e5b80f3b3ed (patch) | |
tree | 2d6de5a71627556a615ef15676a514686cc1c81b | |
parent | 880211667b203dd32724f3be224c44c0400aa0a6 (diff) |
binder: remove user_buffer_offset
Remove user_buffer_offset since there is no kernel
buffer pointer anymore.
Signed-off-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | drivers/android/binder.c | 39 | ||||
-rw-r--r-- | drivers/android/binder_alloc.c | 16 | ||||
-rw-r--r-- | drivers/android/binder_alloc.h | 23 |
3 files changed, 13 insertions, 65 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index db0c45bc9134..ddd692cbcd7a 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c | |||
@@ -2380,7 +2380,6 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, | |||
2380 | struct binder_fd_array_object *fda; | 2380 | struct binder_fd_array_object *fda; |
2381 | struct binder_buffer_object *parent; | 2381 | struct binder_buffer_object *parent; |
2382 | struct binder_object ptr_object; | 2382 | struct binder_object ptr_object; |
2383 | uintptr_t parent_buffer; | ||
2384 | u32 *fd_array; | 2383 | u32 *fd_array; |
2385 | size_t fd_index; | 2384 | size_t fd_index; |
2386 | binder_size_t fd_buf_size; | 2385 | binder_size_t fd_buf_size; |
@@ -2405,14 +2404,6 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, | |||
2405 | debug_id); | 2404 | debug_id); |
2406 | continue; | 2405 | continue; |
2407 | } | 2406 | } |
2408 | /* | ||
2409 | * Since the parent was already fixed up, convert it | ||
2410 | * back to kernel address space to access it | ||
2411 | */ | ||
2412 | parent_buffer = parent->buffer - | ||
2413 | binder_alloc_get_user_buffer_offset( | ||
2414 | &proc->alloc); | ||
2415 | |||
2416 | fd_buf_size = sizeof(u32) * fda->num_fds; | 2407 | fd_buf_size = sizeof(u32) * fda->num_fds; |
2417 | if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { | 2408 | if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { |
2418 | pr_err("transaction release %d invalid number of fds (%lld)\n", | 2409 | pr_err("transaction release %d invalid number of fds (%lld)\n", |
@@ -2426,7 +2417,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, | |||
2426 | debug_id, (u64)fda->num_fds); | 2417 | debug_id, (u64)fda->num_fds); |
2427 | continue; | 2418 | continue; |
2428 | } | 2419 | } |
2429 | fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); | 2420 | fd_array = (u32 *)(uintptr_t) |
2421 | (parent->buffer + fda->parent_offset); | ||
2430 | for (fd_index = 0; fd_index < fda->num_fds; | 2422 | for (fd_index = 0; fd_index < fda->num_fds; |
2431 | fd_index++) { | 2423 | fd_index++) { |
2432 | u32 fd; | 2424 | u32 fd; |
@@ -2646,7 +2638,6 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, | |||
2646 | struct binder_transaction *in_reply_to) | 2638 | struct binder_transaction *in_reply_to) |
2647 | { | 2639 | { |
2648 | binder_size_t fdi, fd_buf_size; | 2640 | binder_size_t fdi, fd_buf_size; |
2649 | uintptr_t parent_buffer; | ||
2650 | u32 *fd_array; | 2641 | u32 *fd_array; |
2651 | struct binder_proc *proc = thread->proc; | 2642 | struct binder_proc *proc = thread->proc; |
2652 | struct binder_proc *target_proc = t->to_proc; | 2643 | struct binder_proc *target_proc = t->to_proc; |
@@ -2664,13 +2655,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, | |||
2664 | proc->pid, thread->pid, (u64)fda->num_fds); | 2655 | proc->pid, thread->pid, (u64)fda->num_fds); |
2665 | return -EINVAL; | 2656 | return -EINVAL; |
2666 | } | 2657 | } |
2667 | /* | 2658 | fd_array = (u32 *)(uintptr_t)(parent->buffer + fda->parent_offset); |
2668 | * Since the parent was already fixed up, convert it | ||
2669 | * back to the kernel address space to access it | ||
2670 | */ | ||
2671 | parent_buffer = parent->buffer - | ||
2672 | binder_alloc_get_user_buffer_offset(&target_proc->alloc); | ||
2673 | fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); | ||
2674 | if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { | 2659 | if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { |
2675 | binder_user_error("%d:%d parent offset not aligned correctly.\n", | 2660 | binder_user_error("%d:%d parent offset not aligned correctly.\n", |
2676 | proc->pid, thread->pid); | 2661 | proc->pid, thread->pid); |
@@ -2703,7 +2688,6 @@ static int binder_fixup_parent(struct binder_transaction *t, | |||
2703 | binder_size_t last_fixup_min_off) | 2688 | binder_size_t last_fixup_min_off) |
2704 | { | 2689 | { |
2705 | struct binder_buffer_object *parent; | 2690 | struct binder_buffer_object *parent; |
2706 | u8 *parent_buffer; | ||
2707 | struct binder_buffer *b = t->buffer; | 2691 | struct binder_buffer *b = t->buffer; |
2708 | struct binder_proc *proc = thread->proc; | 2692 | struct binder_proc *proc = thread->proc; |
2709 | struct binder_proc *target_proc = t->to_proc; | 2693 | struct binder_proc *target_proc = t->to_proc; |
@@ -2739,11 +2723,8 @@ static int binder_fixup_parent(struct binder_transaction *t, | |||
2739 | proc->pid, thread->pid); | 2723 | proc->pid, thread->pid); |
2740 | return -EINVAL; | 2724 | return -EINVAL; |
2741 | } | 2725 | } |
2742 | parent_buffer = (u8 *)((uintptr_t)parent->buffer - | ||
2743 | binder_alloc_get_user_buffer_offset( | ||
2744 | &target_proc->alloc)); | ||
2745 | buffer_offset = bp->parent_offset + | 2726 | buffer_offset = bp->parent_offset + |
2746 | (uintptr_t)parent_buffer - (uintptr_t)b->data; | 2727 | (uintptr_t)parent->buffer - (uintptr_t)b->data; |
2747 | binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset, | 2728 | binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset, |
2748 | &bp->buffer, sizeof(bp->buffer)); | 2729 | &bp->buffer, sizeof(bp->buffer)); |
2749 | 2730 | ||
@@ -3159,10 +3140,8 @@ static void binder_transaction(struct binder_proc *proc, | |||
3159 | ALIGN(tr->offsets_size, sizeof(void *)) + | 3140 | ALIGN(tr->offsets_size, sizeof(void *)) + |
3160 | ALIGN(extra_buffers_size, sizeof(void *)) - | 3141 | ALIGN(extra_buffers_size, sizeof(void *)) - |
3161 | ALIGN(secctx_sz, sizeof(u64)); | 3142 | ALIGN(secctx_sz, sizeof(u64)); |
3162 | char *kptr = t->buffer->data + buf_offset; | ||
3163 | 3143 | ||
3164 | t->security_ctx = (uintptr_t)kptr + | 3144 | t->security_ctx = (uintptr_t)t->buffer->data + buf_offset; |
3165 | binder_alloc_get_user_buffer_offset(&target_proc->alloc); | ||
3166 | binder_alloc_copy_to_buffer(&target_proc->alloc, | 3145 | binder_alloc_copy_to_buffer(&target_proc->alloc, |
3167 | t->buffer, buf_offset, | 3146 | t->buffer, buf_offset, |
3168 | secctx, secctx_sz); | 3147 | secctx, secctx_sz); |
@@ -3380,9 +3359,7 @@ static void binder_transaction(struct binder_proc *proc, | |||
3380 | goto err_copy_data_failed; | 3359 | goto err_copy_data_failed; |
3381 | } | 3360 | } |
3382 | /* Fixup buffer pointer to target proc address space */ | 3361 | /* Fixup buffer pointer to target proc address space */ |
3383 | bp->buffer = (uintptr_t)sg_bufp + | 3362 | bp->buffer = (uintptr_t)sg_bufp; |
3384 | binder_alloc_get_user_buffer_offset( | ||
3385 | &target_proc->alloc); | ||
3386 | sg_bufp += ALIGN(bp->length, sizeof(u64)); | 3363 | sg_bufp += ALIGN(bp->length, sizeof(u64)); |
3387 | 3364 | ||
3388 | ret = binder_fixup_parent(t, thread, bp, | 3365 | ret = binder_fixup_parent(t, thread, bp, |
@@ -4474,9 +4451,7 @@ retry: | |||
4474 | } | 4451 | } |
4475 | trd->data_size = t->buffer->data_size; | 4452 | trd->data_size = t->buffer->data_size; |
4476 | trd->offsets_size = t->buffer->offsets_size; | 4453 | trd->offsets_size = t->buffer->offsets_size; |
4477 | trd->data.ptr.buffer = (binder_uintptr_t) | 4454 | trd->data.ptr.buffer = (uintptr_t)t->buffer->data; |
4478 | ((uintptr_t)t->buffer->data + | ||
4479 | binder_alloc_get_user_buffer_offset(&proc->alloc)); | ||
4480 | trd->data.ptr.offsets = trd->data.ptr.buffer + | 4455 | trd->data.ptr.offsets = trd->data.ptr.buffer + |
4481 | ALIGN(t->buffer->data_size, | 4456 | ALIGN(t->buffer->data_size, |
4482 | sizeof(void *)); | 4457 | sizeof(void *)); |
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index d4cbe4b3947a..0e7f0aa967c3 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c | |||
@@ -138,17 +138,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked( | |||
138 | { | 138 | { |
139 | struct rb_node *n = alloc->allocated_buffers.rb_node; | 139 | struct rb_node *n = alloc->allocated_buffers.rb_node; |
140 | struct binder_buffer *buffer; | 140 | struct binder_buffer *buffer; |
141 | void *kern_ptr; | 141 | void *uptr; |
142 | 142 | ||
143 | kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset); | 143 | uptr = (void *)user_ptr; |
144 | 144 | ||
145 | while (n) { | 145 | while (n) { |
146 | buffer = rb_entry(n, struct binder_buffer, rb_node); | 146 | buffer = rb_entry(n, struct binder_buffer, rb_node); |
147 | BUG_ON(buffer->free); | 147 | BUG_ON(buffer->free); |
148 | 148 | ||
149 | if (kern_ptr < buffer->data) | 149 | if (uptr < buffer->data) |
150 | n = n->rb_left; | 150 | n = n->rb_left; |
151 | else if (kern_ptr > buffer->data) | 151 | else if (uptr > buffer->data) |
152 | n = n->rb_right; | 152 | n = n->rb_right; |
153 | else { | 153 | else { |
154 | /* | 154 | /* |
@@ -265,8 +265,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, | |||
265 | page->alloc = alloc; | 265 | page->alloc = alloc; |
266 | INIT_LIST_HEAD(&page->lru); | 266 | INIT_LIST_HEAD(&page->lru); |
267 | 267 | ||
268 | user_page_addr = | 268 | user_page_addr = (uintptr_t)page_addr; |
269 | (uintptr_t)page_addr + alloc->user_buffer_offset; | ||
270 | ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr); | 269 | ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr); |
271 | if (ret) { | 270 | if (ret) { |
272 | pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", | 271 | pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", |
@@ -694,7 +693,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, | |||
694 | } | 693 | } |
695 | 694 | ||
696 | alloc->buffer = (void *)vma->vm_start; | 695 | alloc->buffer = (void *)vma->vm_start; |
697 | alloc->user_buffer_offset = 0; | ||
698 | mutex_unlock(&binder_alloc_mmap_lock); | 696 | mutex_unlock(&binder_alloc_mmap_lock); |
699 | 697 | ||
700 | alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE, | 698 | alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE, |
@@ -941,9 +939,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, | |||
941 | if (vma) { | 939 | if (vma) { |
942 | trace_binder_unmap_user_start(alloc, index); | 940 | trace_binder_unmap_user_start(alloc, index); |
943 | 941 | ||
944 | zap_page_range(vma, | 942 | zap_page_range(vma, page_addr, PAGE_SIZE); |
945 | page_addr + alloc->user_buffer_offset, | ||
946 | PAGE_SIZE); | ||
947 | 943 | ||
948 | trace_binder_unmap_user_end(alloc, index); | 944 | trace_binder_unmap_user_end(alloc, index); |
949 | 945 | ||
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index 9d682b9d6c24..1026e9fb20db 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h | |||
@@ -82,7 +82,6 @@ struct binder_lru_page { | |||
82 | * (invariant after init) | 82 | * (invariant after init) |
83 | * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap) | 83 | * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap) |
84 | * @buffer: base of per-proc address space mapped via mmap | 84 | * @buffer: base of per-proc address space mapped via mmap |
85 | * @user_buffer_offset: offset between user and kernel VAs for buffer | ||
86 | * @buffers: list of all buffers for this proc | 85 | * @buffers: list of all buffers for this proc |
87 | * @free_buffers: rb tree of buffers available for allocation | 86 | * @free_buffers: rb tree of buffers available for allocation |
88 | * sorted by size | 87 | * sorted by size |
@@ -104,7 +103,6 @@ struct binder_alloc { | |||
104 | struct vm_area_struct *vma; | 103 | struct vm_area_struct *vma; |
105 | struct mm_struct *vma_vm_mm; | 104 | struct mm_struct *vma_vm_mm; |
106 | void *buffer; | 105 | void *buffer; |
107 | ptrdiff_t user_buffer_offset; | ||
108 | struct list_head buffers; | 106 | struct list_head buffers; |
109 | struct rb_root free_buffers; | 107 | struct rb_root free_buffers; |
110 | struct rb_root allocated_buffers; | 108 | struct rb_root allocated_buffers; |
@@ -163,27 +161,6 @@ binder_alloc_get_free_async_space(struct binder_alloc *alloc) | |||
163 | return free_async_space; | 161 | return free_async_space; |
164 | } | 162 | } |
165 | 163 | ||
166 | /** | ||
167 | * binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs | ||
168 | * @alloc: binder_alloc for this proc | ||
169 | * | ||
170 | * Return: the offset between kernel and user-space addresses to use for | ||
171 | * virtual address conversion | ||
172 | */ | ||
173 | static inline ptrdiff_t | ||
174 | binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc) | ||
175 | { | ||
176 | /* | ||
177 | * user_buffer_offset is constant if vma is set and | ||
178 | * undefined if vma is not set. It is possible to | ||
179 | * get here with !alloc->vma if the target process | ||
180 | * is dying while a transaction is being initiated. | ||
181 | * Returning the old value is ok in this case and | ||
182 | * the transaction will fail. | ||
183 | */ | ||
184 | return alloc->user_buffer_offset; | ||
185 | } | ||
186 | |||
187 | unsigned long | 164 | unsigned long |
188 | binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, | 165 | binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, |
189 | struct binder_buffer *buffer, | 166 | struct binder_buffer *buffer, |