diff options
author | Todd Kjos <tkjos@android.com> | 2019-02-08 13:35:19 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2019-02-12 04:43:57 -0500 |
commit | c41358a5f5217abd7c051e8d42397e5b80f3b3ed (patch) | |
tree | 2d6de5a71627556a615ef15676a514686cc1c81b /drivers/android/binder_alloc.c | |
parent | 880211667b203dd32724f3be224c44c0400aa0a6 (diff) |
binder: remove user_buffer_offset
Remove user_buffer_offset since there is no kernel
buffer pointer anymore.
Signed-off-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r-- | drivers/android/binder_alloc.c | 16 |
1 files changed, 6 insertions, 10 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index d4cbe4b3947a..0e7f0aa967c3 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c | |||
@@ -138,17 +138,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked( | |||
138 | { | 138 | { |
139 | struct rb_node *n = alloc->allocated_buffers.rb_node; | 139 | struct rb_node *n = alloc->allocated_buffers.rb_node; |
140 | struct binder_buffer *buffer; | 140 | struct binder_buffer *buffer; |
141 | void *kern_ptr; | 141 | void *uptr; |
142 | 142 | ||
143 | kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset); | 143 | uptr = (void *)user_ptr; |
144 | 144 | ||
145 | while (n) { | 145 | while (n) { |
146 | buffer = rb_entry(n, struct binder_buffer, rb_node); | 146 | buffer = rb_entry(n, struct binder_buffer, rb_node); |
147 | BUG_ON(buffer->free); | 147 | BUG_ON(buffer->free); |
148 | 148 | ||
149 | if (kern_ptr < buffer->data) | 149 | if (uptr < buffer->data) |
150 | n = n->rb_left; | 150 | n = n->rb_left; |
151 | else if (kern_ptr > buffer->data) | 151 | else if (uptr > buffer->data) |
152 | n = n->rb_right; | 152 | n = n->rb_right; |
153 | else { | 153 | else { |
154 | /* | 154 | /* |
@@ -265,8 +265,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, | |||
265 | page->alloc = alloc; | 265 | page->alloc = alloc; |
266 | INIT_LIST_HEAD(&page->lru); | 266 | INIT_LIST_HEAD(&page->lru); |
267 | 267 | ||
268 | user_page_addr = | 268 | user_page_addr = (uintptr_t)page_addr; |
269 | (uintptr_t)page_addr + alloc->user_buffer_offset; | ||
270 | ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr); | 269 | ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr); |
271 | if (ret) { | 270 | if (ret) { |
272 | pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", | 271 | pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", |
@@ -694,7 +693,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, | |||
694 | } | 693 | } |
695 | 694 | ||
696 | alloc->buffer = (void *)vma->vm_start; | 695 | alloc->buffer = (void *)vma->vm_start; |
697 | alloc->user_buffer_offset = 0; | ||
698 | mutex_unlock(&binder_alloc_mmap_lock); | 696 | mutex_unlock(&binder_alloc_mmap_lock); |
699 | 697 | ||
700 | alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE, | 698 | alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE, |
@@ -941,9 +939,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, | |||
941 | if (vma) { | 939 | if (vma) { |
942 | trace_binder_unmap_user_start(alloc, index); | 940 | trace_binder_unmap_user_start(alloc, index); |
943 | 941 | ||
944 | zap_page_range(vma, | 942 | zap_page_range(vma, page_addr, PAGE_SIZE); |
945 | page_addr + alloc->user_buffer_offset, | ||
946 | PAGE_SIZE); | ||
947 | 943 | ||
948 | trace_binder_unmap_user_end(alloc, index); | 944 | trace_binder_unmap_user_end(alloc, index); |
949 | 945 | ||