diff options
author | Todd Kjos <tkjos@android.com> | 2019-02-08 13:35:20 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2019-02-12 04:43:57 -0500 |
commit | bde4a19fc04f5f46298c86b1acb7a4af1d5f138d (patch) | |
tree | 1062d8eaaf1dbfc760c295387390de1f4022e64c /drivers/android/binder_alloc.c | |
parent | c41358a5f5217abd7c051e8d42397e5b80f3b3ed (diff) |
binder: use userspace pointer as base of buffer space
Now that alloc->buffer points to the userspace vm_area
rename buffer->data to buffer->user_data and rename
local pointers that hold user addresses. Also use the
"__user" tag to annotate all user pointers so sparse
can flag cases where user pointer vaues are copied to
kernel pointers. Refactor code to use offsets instead
of user pointers.
Signed-off-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r-- | drivers/android/binder_alloc.c | 87 |
1 files changed, 45 insertions, 42 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 0e7f0aa967c3..000dd4d145ba 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c | |||
@@ -69,9 +69,8 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, | |||
69 | struct binder_buffer *buffer) | 69 | struct binder_buffer *buffer) |
70 | { | 70 | { |
71 | if (list_is_last(&buffer->entry, &alloc->buffers)) | 71 | if (list_is_last(&buffer->entry, &alloc->buffers)) |
72 | return (u8 *)alloc->buffer + | 72 | return alloc->buffer + alloc->buffer_size - buffer->user_data; |
73 | alloc->buffer_size - (u8 *)buffer->data; | 73 | return binder_buffer_next(buffer)->user_data - buffer->user_data; |
74 | return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data; | ||
75 | } | 74 | } |
76 | 75 | ||
77 | static void binder_insert_free_buffer(struct binder_alloc *alloc, | 76 | static void binder_insert_free_buffer(struct binder_alloc *alloc, |
@@ -121,9 +120,9 @@ static void binder_insert_allocated_buffer_locked( | |||
121 | buffer = rb_entry(parent, struct binder_buffer, rb_node); | 120 | buffer = rb_entry(parent, struct binder_buffer, rb_node); |
122 | BUG_ON(buffer->free); | 121 | BUG_ON(buffer->free); |
123 | 122 | ||
124 | if (new_buffer->data < buffer->data) | 123 | if (new_buffer->user_data < buffer->user_data) |
125 | p = &parent->rb_left; | 124 | p = &parent->rb_left; |
126 | else if (new_buffer->data > buffer->data) | 125 | else if (new_buffer->user_data > buffer->user_data) |
127 | p = &parent->rb_right; | 126 | p = &parent->rb_right; |
128 | else | 127 | else |
129 | BUG(); | 128 | BUG(); |
@@ -138,17 +137,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked( | |||
138 | { | 137 | { |
139 | struct rb_node *n = alloc->allocated_buffers.rb_node; | 138 | struct rb_node *n = alloc->allocated_buffers.rb_node; |
140 | struct binder_buffer *buffer; | 139 | struct binder_buffer *buffer; |
141 | void *uptr; | 140 | void __user *uptr; |
142 | 141 | ||
143 | uptr = (void *)user_ptr; | 142 | uptr = (void __user *)user_ptr; |
144 | 143 | ||
145 | while (n) { | 144 | while (n) { |
146 | buffer = rb_entry(n, struct binder_buffer, rb_node); | 145 | buffer = rb_entry(n, struct binder_buffer, rb_node); |
147 | BUG_ON(buffer->free); | 146 | BUG_ON(buffer->free); |
148 | 147 | ||
149 | if (uptr < buffer->data) | 148 | if (uptr < buffer->user_data) |
150 | n = n->rb_left; | 149 | n = n->rb_left; |
151 | else if (uptr > buffer->data) | 150 | else if (uptr > buffer->user_data) |
152 | n = n->rb_right; | 151 | n = n->rb_right; |
153 | else { | 152 | else { |
154 | /* | 153 | /* |
@@ -188,9 +187,9 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, | |||
188 | } | 187 | } |
189 | 188 | ||
190 | static int binder_update_page_range(struct binder_alloc *alloc, int allocate, | 189 | static int binder_update_page_range(struct binder_alloc *alloc, int allocate, |
191 | void *start, void *end) | 190 | void __user *start, void __user *end) |
192 | { | 191 | { |
193 | void *page_addr; | 192 | void __user *page_addr; |
194 | unsigned long user_page_addr; | 193 | unsigned long user_page_addr; |
195 | struct binder_lru_page *page; | 194 | struct binder_lru_page *page; |
196 | struct vm_area_struct *vma = NULL; | 195 | struct vm_area_struct *vma = NULL; |
@@ -357,8 +356,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked( | |||
357 | struct binder_buffer *buffer; | 356 | struct binder_buffer *buffer; |
358 | size_t buffer_size; | 357 | size_t buffer_size; |
359 | struct rb_node *best_fit = NULL; | 358 | struct rb_node *best_fit = NULL; |
360 | void *has_page_addr; | 359 | void __user *has_page_addr; |
361 | void *end_page_addr; | 360 | void __user *end_page_addr; |
362 | size_t size, data_offsets_size; | 361 | size_t size, data_offsets_size; |
363 | int ret; | 362 | int ret; |
364 | 363 | ||
@@ -456,15 +455,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked( | |||
456 | "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", | 455 | "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", |
457 | alloc->pid, size, buffer, buffer_size); | 456 | alloc->pid, size, buffer, buffer_size); |
458 | 457 | ||
459 | has_page_addr = | 458 | has_page_addr = (void __user *) |
460 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); | 459 | (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK); |
461 | WARN_ON(n && buffer_size != size); | 460 | WARN_ON(n && buffer_size != size); |
462 | end_page_addr = | 461 | end_page_addr = |
463 | (void *)PAGE_ALIGN((uintptr_t)buffer->data + size); | 462 | (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size); |
464 | if (end_page_addr > has_page_addr) | 463 | if (end_page_addr > has_page_addr) |
465 | end_page_addr = has_page_addr; | 464 | end_page_addr = has_page_addr; |
466 | ret = binder_update_page_range(alloc, 1, | 465 | ret = binder_update_page_range(alloc, 1, (void __user *) |
467 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr); | 466 | PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr); |
468 | if (ret) | 467 | if (ret) |
469 | return ERR_PTR(ret); | 468 | return ERR_PTR(ret); |
470 | 469 | ||
@@ -477,7 +476,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( | |||
477 | __func__, alloc->pid); | 476 | __func__, alloc->pid); |
478 | goto err_alloc_buf_struct_failed; | 477 | goto err_alloc_buf_struct_failed; |
479 | } | 478 | } |
480 | new_buffer->data = (u8 *)buffer->data + size; | 479 | new_buffer->user_data = (u8 __user *)buffer->user_data + size; |
481 | list_add(&new_buffer->entry, &buffer->entry); | 480 | list_add(&new_buffer->entry, &buffer->entry); |
482 | new_buffer->free = 1; | 481 | new_buffer->free = 1; |
483 | binder_insert_free_buffer(alloc, new_buffer); | 482 | binder_insert_free_buffer(alloc, new_buffer); |
@@ -503,8 +502,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked( | |||
503 | return buffer; | 502 | return buffer; |
504 | 503 | ||
505 | err_alloc_buf_struct_failed: | 504 | err_alloc_buf_struct_failed: |
506 | binder_update_page_range(alloc, 0, | 505 | binder_update_page_range(alloc, 0, (void __user *) |
507 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), | 506 | PAGE_ALIGN((uintptr_t)buffer->user_data), |
508 | end_page_addr); | 507 | end_page_addr); |
509 | return ERR_PTR(-ENOMEM); | 508 | return ERR_PTR(-ENOMEM); |
510 | } | 509 | } |
@@ -539,14 +538,15 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, | |||
539 | return buffer; | 538 | return buffer; |
540 | } | 539 | } |
541 | 540 | ||
542 | static void *buffer_start_page(struct binder_buffer *buffer) | 541 | static void __user *buffer_start_page(struct binder_buffer *buffer) |
543 | { | 542 | { |
544 | return (void *)((uintptr_t)buffer->data & PAGE_MASK); | 543 | return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK); |
545 | } | 544 | } |
546 | 545 | ||
547 | static void *prev_buffer_end_page(struct binder_buffer *buffer) | 546 | static void __user *prev_buffer_end_page(struct binder_buffer *buffer) |
548 | { | 547 | { |
549 | return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK); | 548 | return (void __user *) |
549 | (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK); | ||
550 | } | 550 | } |
551 | 551 | ||
552 | static void binder_delete_free_buffer(struct binder_alloc *alloc, | 552 | static void binder_delete_free_buffer(struct binder_alloc *alloc, |
@@ -561,7 +561,8 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc, | |||
561 | to_free = false; | 561 | to_free = false; |
562 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | 562 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
563 | "%d: merge free, buffer %pK share page with %pK\n", | 563 | "%d: merge free, buffer %pK share page with %pK\n", |
564 | alloc->pid, buffer->data, prev->data); | 564 | alloc->pid, buffer->user_data, |
565 | prev->user_data); | ||
565 | } | 566 | } |
566 | 567 | ||
567 | if (!list_is_last(&buffer->entry, &alloc->buffers)) { | 568 | if (!list_is_last(&buffer->entry, &alloc->buffers)) { |
@@ -571,23 +572,24 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc, | |||
571 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | 572 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
572 | "%d: merge free, buffer %pK share page with %pK\n", | 573 | "%d: merge free, buffer %pK share page with %pK\n", |
573 | alloc->pid, | 574 | alloc->pid, |
574 | buffer->data, | 575 | buffer->user_data, |
575 | next->data); | 576 | next->user_data); |
576 | } | 577 | } |
577 | } | 578 | } |
578 | 579 | ||
579 | if (PAGE_ALIGNED(buffer->data)) { | 580 | if (PAGE_ALIGNED(buffer->user_data)) { |
580 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | 581 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
581 | "%d: merge free, buffer start %pK is page aligned\n", | 582 | "%d: merge free, buffer start %pK is page aligned\n", |
582 | alloc->pid, buffer->data); | 583 | alloc->pid, buffer->user_data); |
583 | to_free = false; | 584 | to_free = false; |
584 | } | 585 | } |
585 | 586 | ||
586 | if (to_free) { | 587 | if (to_free) { |
587 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | 588 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
588 | "%d: merge free, buffer %pK do not share page with %pK or %pK\n", | 589 | "%d: merge free, buffer %pK do not share page with %pK or %pK\n", |
589 | alloc->pid, buffer->data, | 590 | alloc->pid, buffer->user_data, |
590 | prev->data, next ? next->data : NULL); | 591 | prev->user_data, |
592 | next ? next->user_data : NULL); | ||
591 | binder_update_page_range(alloc, 0, buffer_start_page(buffer), | 593 | binder_update_page_range(alloc, 0, buffer_start_page(buffer), |
592 | buffer_start_page(buffer) + PAGE_SIZE); | 594 | buffer_start_page(buffer) + PAGE_SIZE); |
593 | } | 595 | } |
@@ -613,8 +615,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, | |||
613 | BUG_ON(buffer->free); | 615 | BUG_ON(buffer->free); |
614 | BUG_ON(size > buffer_size); | 616 | BUG_ON(size > buffer_size); |
615 | BUG_ON(buffer->transaction != NULL); | 617 | BUG_ON(buffer->transaction != NULL); |
616 | BUG_ON(buffer->data < alloc->buffer); | 618 | BUG_ON(buffer->user_data < alloc->buffer); |
617 | BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size); | 619 | BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); |
618 | 620 | ||
619 | if (buffer->async_transaction) { | 621 | if (buffer->async_transaction) { |
620 | alloc->free_async_space += size + sizeof(struct binder_buffer); | 622 | alloc->free_async_space += size + sizeof(struct binder_buffer); |
@@ -625,8 +627,9 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, | |||
625 | } | 627 | } |
626 | 628 | ||
627 | binder_update_page_range(alloc, 0, | 629 | binder_update_page_range(alloc, 0, |
628 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), | 630 | (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data), |
629 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK)); | 631 | (void __user *)(((uintptr_t) |
632 | buffer->user_data + buffer_size) & PAGE_MASK)); | ||
630 | 633 | ||
631 | rb_erase(&buffer->rb_node, &alloc->allocated_buffers); | 634 | rb_erase(&buffer->rb_node, &alloc->allocated_buffers); |
632 | buffer->free = 1; | 635 | buffer->free = 1; |
@@ -692,7 +695,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, | |||
692 | goto err_already_mapped; | 695 | goto err_already_mapped; |
693 | } | 696 | } |
694 | 697 | ||
695 | alloc->buffer = (void *)vma->vm_start; | 698 | alloc->buffer = (void __user *)vma->vm_start; |
696 | mutex_unlock(&binder_alloc_mmap_lock); | 699 | mutex_unlock(&binder_alloc_mmap_lock); |
697 | 700 | ||
698 | alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE, | 701 | alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE, |
@@ -712,7 +715,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, | |||
712 | goto err_alloc_buf_struct_failed; | 715 | goto err_alloc_buf_struct_failed; |
713 | } | 716 | } |
714 | 717 | ||
715 | buffer->data = alloc->buffer; | 718 | buffer->user_data = alloc->buffer; |
716 | list_add(&buffer->entry, &alloc->buffers); | 719 | list_add(&buffer->entry, &alloc->buffers); |
717 | buffer->free = 1; | 720 | buffer->free = 1; |
718 | binder_insert_free_buffer(alloc, buffer); | 721 | binder_insert_free_buffer(alloc, buffer); |
@@ -773,7 +776,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) | |||
773 | int i; | 776 | int i; |
774 | 777 | ||
775 | for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { | 778 | for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { |
776 | void *page_addr; | 779 | void __user *page_addr; |
777 | bool on_lru; | 780 | bool on_lru; |
778 | 781 | ||
779 | if (!alloc->pages[i].page_ptr) | 782 | if (!alloc->pages[i].page_ptr) |
@@ -804,7 +807,7 @@ static void print_binder_buffer(struct seq_file *m, const char *prefix, | |||
804 | struct binder_buffer *buffer) | 807 | struct binder_buffer *buffer) |
805 | { | 808 | { |
806 | seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n", | 809 | seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n", |
807 | prefix, buffer->debug_id, buffer->data, | 810 | prefix, buffer->debug_id, buffer->user_data, |
808 | buffer->data_size, buffer->offsets_size, | 811 | buffer->data_size, buffer->offsets_size, |
809 | buffer->extra_buffers_size, | 812 | buffer->extra_buffers_size, |
810 | buffer->transaction ? "active" : "delivered"); | 813 | buffer->transaction ? "active" : "delivered"); |
@@ -1056,7 +1059,7 @@ static inline bool check_buffer(struct binder_alloc *alloc, | |||
1056 | * @pgoffp: address to copy final page offset to | 1059 | * @pgoffp: address to copy final page offset to |
1057 | * | 1060 | * |
1058 | * Lookup the struct page corresponding to the address | 1061 | * Lookup the struct page corresponding to the address |
1059 | * at @buffer_offset into @buffer->data. If @pgoffp is not | 1062 | * at @buffer_offset into @buffer->user_data. If @pgoffp is not |
1060 | * NULL, the byte-offset into the page is written there. | 1063 | * NULL, the byte-offset into the page is written there. |
1061 | * | 1064 | * |
1062 | * The caller is responsible to ensure that the offset points | 1065 | * The caller is responsible to ensure that the offset points |
@@ -1073,7 +1076,7 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc, | |||
1073 | pgoff_t *pgoffp) | 1076 | pgoff_t *pgoffp) |
1074 | { | 1077 | { |
1075 | binder_size_t buffer_space_offset = buffer_offset + | 1078 | binder_size_t buffer_space_offset = buffer_offset + |
1076 | (buffer->data - alloc->buffer); | 1079 | (buffer->user_data - alloc->buffer); |
1077 | pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK; | 1080 | pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK; |
1078 | size_t index = buffer_space_offset >> PAGE_SHIFT; | 1081 | size_t index = buffer_space_offset >> PAGE_SHIFT; |
1079 | struct binder_lru_page *lru_page; | 1082 | struct binder_lru_page *lru_page; |