aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTodd Kjos <tkjos@android.com>2017-06-29 15:01:38 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-07-17 08:47:28 -0400
commitfdfb4a99b6ab8c393db19e3b92968b74ca2757b0 (patch)
treef214ebc813f7654ad19400011ab5a5778a6c0dba
parent00b40d613352c623aaae88a44e5ded7c912909d7 (diff)
binder: separate binder allocator structure from binder proc
The binder allocator is logically separate from the rest of the binder drivers. Separating the data structures to prepare for splitting into separate file with separate locking. Signed-off-by: Todd Kjos <tkjos@google.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/android/binder.c212
-rw-r--r--drivers/android/binder_trace.h2
2 files changed, 129 insertions, 85 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index f7665c31feca..1097d056ea6b 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -319,6 +319,41 @@ enum binder_deferred_state {
319 BINDER_DEFERRED_RELEASE = 0x04, 319 BINDER_DEFERRED_RELEASE = 0x04,
320}; 320};
321 321
322/**
323 * struct binder_alloc - per-binder proc state for binder allocator
324 * @vma: vm_area_struct passed to mmap_handler
325 * (invarient after mmap)
326 * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
327 * @buffer: base of per-proc address space mapped via mmap
328 * @user_buffer_offset: offset between user and kernel VAs for buffer
329 * @buffers: list of all buffers for this proc
330 * @free_buffers: rb tree of buffers available for allocation
331 * sorted by size
332 * @allocated_buffers: rb tree of allocated buffers sorted by address
333 * @free_async_space: VA space available for async buffers. This is
334 * initialized at mmap time to 1/2 the full VA space
335 * @pages: array of physical page addresses for each page of
336 * mmap'd space
337 * @buffer_size: size of address space (could be less than requested)
338 *
339 * Bookkeeping structure for per-proc address space management for binder
340 * buffers. It is normally initialized during binder_init() and binder_mmap()
341 * calls. The address space is used for both user-visible buffers and for
342 * struct binder_buffer objects used to track the user buffers
343 */
344struct binder_alloc {
345 struct vm_area_struct *vma;
346 struct mm_struct *vma_vm_mm;
347 void *buffer;
348 ptrdiff_t user_buffer_offset;
349 struct list_head buffers;
350 struct rb_root free_buffers;
351 struct rb_root allocated_buffers;
352 size_t free_async_space;
353 struct page **pages;
354 size_t buffer_size;
355};
356
322struct binder_proc { 357struct binder_proc {
323 struct hlist_node proc_node; 358 struct hlist_node proc_node;
324 struct rb_root threads; 359 struct rb_root threads;
@@ -326,23 +361,11 @@ struct binder_proc {
326 struct rb_root refs_by_desc; 361 struct rb_root refs_by_desc;
327 struct rb_root refs_by_node; 362 struct rb_root refs_by_node;
328 int pid; 363 int pid;
329 struct vm_area_struct *vma;
330 struct mm_struct *vma_vm_mm;
331 struct task_struct *tsk; 364 struct task_struct *tsk;
332 struct files_struct *files; 365 struct files_struct *files;
333 struct hlist_node deferred_work_node; 366 struct hlist_node deferred_work_node;
334 int deferred_work; 367 int deferred_work;
335 void *buffer;
336 ptrdiff_t user_buffer_offset;
337 368
338 struct list_head buffers;
339 struct rb_root free_buffers;
340 struct rb_root allocated_buffers;
341 size_t free_async_space;
342
343 struct page **pages;
344 size_t buffer_size;
345 uint32_t buffer_free;
346 struct list_head todo; 369 struct list_head todo;
347 wait_queue_head_t wait; 370 wait_queue_head_t wait;
348 struct binder_stats stats; 371 struct binder_stats stats;
@@ -353,6 +376,7 @@ struct binder_proc {
353 int ready_threads; 376 int ready_threads;
354 long default_priority; 377 long default_priority;
355 struct dentry *debugfs_entry; 378 struct dentry *debugfs_entry;
379 struct binder_alloc alloc;
356 struct binder_context *context; 380 struct binder_context *context;
357}; 381};
358 382
@@ -485,8 +509,10 @@ static void binder_set_nice(long nice)
485static size_t binder_buffer_size(struct binder_proc *proc, 509static size_t binder_buffer_size(struct binder_proc *proc,
486 struct binder_buffer *buffer) 510 struct binder_buffer *buffer)
487{ 511{
488 if (list_is_last(&buffer->entry, &proc->buffers)) 512 if (list_is_last(&buffer->entry, &proc->alloc.buffers))
489 return proc->buffer + proc->buffer_size - (void *)buffer->data; 513 return proc->alloc.buffer +
514 proc->alloc.buffer_size -
515 (void *)buffer->data;
490 return (size_t)list_entry(buffer->entry.next, 516 return (size_t)list_entry(buffer->entry.next,
491 struct binder_buffer, entry) - (size_t)buffer->data; 517 struct binder_buffer, entry) - (size_t)buffer->data;
492} 518}
@@ -494,7 +520,7 @@ static size_t binder_buffer_size(struct binder_proc *proc,
494static void binder_insert_free_buffer(struct binder_proc *proc, 520static void binder_insert_free_buffer(struct binder_proc *proc,
495 struct binder_buffer *new_buffer) 521 struct binder_buffer *new_buffer)
496{ 522{
497 struct rb_node **p = &proc->free_buffers.rb_node; 523 struct rb_node **p = &proc->alloc.free_buffers.rb_node;
498 struct rb_node *parent = NULL; 524 struct rb_node *parent = NULL;
499 struct binder_buffer *buffer; 525 struct binder_buffer *buffer;
500 size_t buffer_size; 526 size_t buffer_size;
@@ -521,13 +547,13 @@ static void binder_insert_free_buffer(struct binder_proc *proc,
521 p = &parent->rb_right; 547 p = &parent->rb_right;
522 } 548 }
523 rb_link_node(&new_buffer->rb_node, parent, p); 549 rb_link_node(&new_buffer->rb_node, parent, p);
524 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); 550 rb_insert_color(&new_buffer->rb_node, &proc->alloc.free_buffers);
525} 551}
526 552
527static void binder_insert_allocated_buffer(struct binder_proc *proc, 553static void binder_insert_allocated_buffer(struct binder_proc *proc,
528 struct binder_buffer *new_buffer) 554 struct binder_buffer *new_buffer)
529{ 555{
530 struct rb_node **p = &proc->allocated_buffers.rb_node; 556 struct rb_node **p = &proc->alloc.allocated_buffers.rb_node;
531 struct rb_node *parent = NULL; 557 struct rb_node *parent = NULL;
532 struct binder_buffer *buffer; 558 struct binder_buffer *buffer;
533 559
@@ -546,18 +572,19 @@ static void binder_insert_allocated_buffer(struct binder_proc *proc,
546 BUG(); 572 BUG();
547 } 573 }
548 rb_link_node(&new_buffer->rb_node, parent, p); 574 rb_link_node(&new_buffer->rb_node, parent, p);
549 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); 575 rb_insert_color(&new_buffer->rb_node, &proc->alloc.allocated_buffers);
550} 576}
551 577
552static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, 578static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
553 uintptr_t user_ptr) 579 uintptr_t user_ptr)
554{ 580{
555 struct rb_node *n = proc->allocated_buffers.rb_node; 581 struct rb_node *n = proc->alloc.allocated_buffers.rb_node;
556 struct binder_buffer *buffer; 582 struct binder_buffer *buffer;
557 struct binder_buffer *kern_ptr; 583 struct binder_buffer *kern_ptr;
558 584
559 kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset 585 kern_ptr = (struct binder_buffer *)
560 - offsetof(struct binder_buffer, data)); 586 (user_ptr - proc->alloc.user_buffer_offset -
587 offsetof(struct binder_buffer, data));
561 588
562 while (n) { 589 while (n) {
563 buffer = rb_entry(n, struct binder_buffer, rb_node); 590 buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -598,8 +625,8 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
598 625
599 if (mm) { 626 if (mm) {
600 down_write(&mm->mmap_sem); 627 down_write(&mm->mmap_sem);
601 vma = proc->vma; 628 vma = proc->alloc.vma;
602 if (vma && mm != proc->vma_vm_mm) { 629 if (vma && mm != proc->alloc.vma_vm_mm) {
603 pr_err("%d: vma mm and task mm mismatch\n", 630 pr_err("%d: vma mm and task mm mismatch\n",
604 proc->pid); 631 proc->pid);
605 vma = NULL; 632 vma = NULL;
@@ -618,7 +645,8 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
618 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { 645 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
619 int ret; 646 int ret;
620 647
621 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 648 page = &proc->alloc.pages[
649 (page_addr - proc->alloc.buffer) / PAGE_SIZE];
622 650
623 BUG_ON(*page); 651 BUG_ON(*page);
624 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 652 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
@@ -637,7 +665,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
637 goto err_map_kernel_failed; 665 goto err_map_kernel_failed;
638 } 666 }
639 user_page_addr = 667 user_page_addr =
640 (uintptr_t)page_addr + proc->user_buffer_offset; 668 (uintptr_t)page_addr + proc->alloc.user_buffer_offset;
641 ret = vm_insert_page(vma, user_page_addr, page[0]); 669 ret = vm_insert_page(vma, user_page_addr, page[0]);
642 if (ret) { 670 if (ret) {
643 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", 671 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
@@ -655,10 +683,11 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
655free_range: 683free_range:
656 for (page_addr = end - PAGE_SIZE; page_addr >= start; 684 for (page_addr = end - PAGE_SIZE; page_addr >= start;
657 page_addr -= PAGE_SIZE) { 685 page_addr -= PAGE_SIZE) {
658 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 686 page = &proc->alloc.pages[
687 (page_addr - proc->alloc.buffer) / PAGE_SIZE];
659 if (vma) 688 if (vma)
660 zap_page_range(vma, (uintptr_t)page_addr + 689 zap_page_range(vma, (uintptr_t)page_addr +
661 proc->user_buffer_offset, PAGE_SIZE); 690 proc->alloc.user_buffer_offset, PAGE_SIZE);
662err_vm_insert_page_failed: 691err_vm_insert_page_failed:
663 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 692 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
664err_map_kernel_failed: 693err_map_kernel_failed:
@@ -681,7 +710,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
681 size_t extra_buffers_size, 710 size_t extra_buffers_size,
682 int is_async) 711 int is_async)
683{ 712{
684 struct rb_node *n = proc->free_buffers.rb_node; 713 struct rb_node *n = proc->alloc.free_buffers.rb_node;
685 struct binder_buffer *buffer; 714 struct binder_buffer *buffer;
686 size_t buffer_size; 715 size_t buffer_size;
687 struct rb_node *best_fit = NULL; 716 struct rb_node *best_fit = NULL;
@@ -689,7 +718,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
689 void *end_page_addr; 718 void *end_page_addr;
690 size_t size, data_offsets_size; 719 size_t size, data_offsets_size;
691 720
692 if (proc->vma == NULL) { 721 if (proc->alloc.vma == NULL) {
693 pr_err("%d: binder_alloc_buf, no vma\n", 722 pr_err("%d: binder_alloc_buf, no vma\n",
694 proc->pid); 723 proc->pid);
695 return NULL; 724 return NULL;
@@ -710,7 +739,8 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
710 return NULL; 739 return NULL;
711 } 740 }
712 if (is_async && 741 if (is_async &&
713 proc->free_async_space < size + sizeof(struct binder_buffer)) { 742 proc->alloc.free_async_space <
743 size + sizeof(struct binder_buffer)) {
714 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 744 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
715 "%d: binder_alloc_buf size %zd failed, no async space left\n", 745 "%d: binder_alloc_buf size %zd failed, no async space left\n",
716 proc->pid, size); 746 proc->pid, size);
@@ -762,7 +792,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
762 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) 792 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
763 return NULL; 793 return NULL;
764 794
765 rb_erase(best_fit, &proc->free_buffers); 795 rb_erase(best_fit, &proc->alloc.free_buffers);
766 buffer->free = 0; 796 buffer->free = 0;
767 binder_insert_allocated_buffer(proc, buffer); 797 binder_insert_allocated_buffer(proc, buffer);
768 if (buffer_size != size) { 798 if (buffer_size != size) {
@@ -780,10 +810,11 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
780 buffer->extra_buffers_size = extra_buffers_size; 810 buffer->extra_buffers_size = extra_buffers_size;
781 buffer->async_transaction = is_async; 811 buffer->async_transaction = is_async;
782 if (is_async) { 812 if (is_async) {
783 proc->free_async_space -= size + sizeof(struct binder_buffer); 813 proc->alloc.free_async_space -=
814 size + sizeof(struct binder_buffer);
784 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 815 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
785 "%d: binder_alloc_buf size %zd async free %zd\n", 816 "%d: binder_alloc_buf size %zd async free %zd\n",
786 proc->pid, size, proc->free_async_space); 817 proc->pid, size, proc->alloc.free_async_space);
787 } 818 }
788 819
789 return buffer; 820 return buffer;
@@ -806,7 +837,7 @@ static void binder_delete_free_buffer(struct binder_proc *proc,
806 int free_page_end = 1; 837 int free_page_end = 1;
807 int free_page_start = 1; 838 int free_page_start = 1;
808 839
809 BUG_ON(proc->buffers.next == &buffer->entry); 840 BUG_ON(proc->alloc.buffers.next == &buffer->entry);
810 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); 841 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
811 BUG_ON(!prev->free); 842 BUG_ON(!prev->free);
812 if (buffer_end_page(prev) == buffer_start_page(buffer)) { 843 if (buffer_end_page(prev) == buffer_start_page(buffer)) {
@@ -818,7 +849,7 @@ static void binder_delete_free_buffer(struct binder_proc *proc,
818 proc->pid, buffer, prev); 849 proc->pid, buffer, prev);
819 } 850 }
820 851
821 if (!list_is_last(&buffer->entry, &proc->buffers)) { 852 if (!list_is_last(&buffer->entry, &proc->alloc.buffers)) {
822 next = list_entry(buffer->entry.next, 853 next = list_entry(buffer->entry.next,
823 struct binder_buffer, entry); 854 struct binder_buffer, entry);
824 if (buffer_start_page(next) == buffer_end_page(buffer)) { 855 if (buffer_start_page(next) == buffer_end_page(buffer)) {
@@ -862,39 +893,40 @@ static void binder_free_buf(struct binder_proc *proc,
862 BUG_ON(buffer->free); 893 BUG_ON(buffer->free);
863 BUG_ON(size > buffer_size); 894 BUG_ON(size > buffer_size);
864 BUG_ON(buffer->transaction != NULL); 895 BUG_ON(buffer->transaction != NULL);
865 BUG_ON((void *)buffer < proc->buffer); 896 BUG_ON((void *)buffer < proc->alloc.buffer);
866 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); 897 BUG_ON((void *)buffer > proc->alloc.buffer + proc->alloc.buffer_size);
867 898
868 if (buffer->async_transaction) { 899 if (buffer->async_transaction) {
869 proc->free_async_space += size + sizeof(struct binder_buffer); 900 proc->alloc.free_async_space +=
901 size + sizeof(struct binder_buffer);
870 902
871 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 903 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
872 "%d: binder_free_buf size %zd async free %zd\n", 904 "%d: binder_free_buf size %zd async free %zd\n",
873 proc->pid, size, proc->free_async_space); 905 proc->pid, size, proc->alloc.free_async_space);
874 } 906 }
875 907
876 binder_update_page_range(proc, 0, 908 binder_update_page_range(proc, 0,
877 (void *)PAGE_ALIGN((uintptr_t)buffer->data), 909 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
878 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), 910 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
879 NULL); 911 NULL);
880 rb_erase(&buffer->rb_node, &proc->allocated_buffers); 912 rb_erase(&buffer->rb_node, &proc->alloc.allocated_buffers);
881 buffer->free = 1; 913 buffer->free = 1;
882 if (!list_is_last(&buffer->entry, &proc->buffers)) { 914 if (!list_is_last(&buffer->entry, &proc->alloc.buffers)) {
883 struct binder_buffer *next = list_entry(buffer->entry.next, 915 struct binder_buffer *next = list_entry(buffer->entry.next,
884 struct binder_buffer, entry); 916 struct binder_buffer, entry);
885 917
886 if (next->free) { 918 if (next->free) {
887 rb_erase(&next->rb_node, &proc->free_buffers); 919 rb_erase(&next->rb_node, &proc->alloc.free_buffers);
888 binder_delete_free_buffer(proc, next); 920 binder_delete_free_buffer(proc, next);
889 } 921 }
890 } 922 }
891 if (proc->buffers.next != &buffer->entry) { 923 if (proc->alloc.buffers.next != &buffer->entry) {
892 struct binder_buffer *prev = list_entry(buffer->entry.prev, 924 struct binder_buffer *prev = list_entry(buffer->entry.prev,
893 struct binder_buffer, entry); 925 struct binder_buffer, entry);
894 926
895 if (prev->free) { 927 if (prev->free) {
896 binder_delete_free_buffer(proc, buffer); 928 binder_delete_free_buffer(proc, buffer);
897 rb_erase(&prev->rb_node, &proc->free_buffers); 929 rb_erase(&prev->rb_node, &proc->alloc.free_buffers);
898 buffer = prev; 930 buffer = prev;
899 } 931 }
900 } 932 }
@@ -1532,7 +1564,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
1532 * back to kernel address space to access it 1564 * back to kernel address space to access it
1533 */ 1565 */
1534 parent_buffer = parent->buffer - 1566 parent_buffer = parent->buffer -
1535 proc->user_buffer_offset; 1567 proc->alloc.user_buffer_offset;
1536 1568
1537 fd_buf_size = sizeof(u32) * fda->num_fds; 1569 fd_buf_size = sizeof(u32) * fda->num_fds;
1538 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { 1570 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
@@ -1750,7 +1782,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
1750 * Since the parent was already fixed up, convert it 1782 * Since the parent was already fixed up, convert it
1751 * back to the kernel address space to access it 1783 * back to the kernel address space to access it
1752 */ 1784 */
1753 parent_buffer = parent->buffer - target_proc->user_buffer_offset; 1785 parent_buffer = parent->buffer - target_proc->alloc.user_buffer_offset;
1754 fd_array = (u32 *)(parent_buffer + fda->parent_offset); 1786 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1755 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { 1787 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
1756 binder_user_error("%d:%d parent offset not aligned correctly.\n", 1788 binder_user_error("%d:%d parent offset not aligned correctly.\n",
@@ -1818,7 +1850,7 @@ static int binder_fixup_parent(struct binder_transaction *t,
1818 return -EINVAL; 1850 return -EINVAL;
1819 } 1851 }
1820 parent_buffer = (u8 *)(parent->buffer - 1852 parent_buffer = (u8 *)(parent->buffer -
1821 target_proc->user_buffer_offset); 1853 target_proc->alloc.user_buffer_offset);
1822 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; 1854 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
1823 1855
1824 return 0; 1856 return 0;
@@ -2158,7 +2190,7 @@ static void binder_transaction(struct binder_proc *proc,
2158 } 2190 }
2159 /* Fixup buffer pointer to target proc address space */ 2191 /* Fixup buffer pointer to target proc address space */
2160 bp->buffer = (uintptr_t)sg_bufp + 2192 bp->buffer = (uintptr_t)sg_bufp +
2161 target_proc->user_buffer_offset; 2193 target_proc->alloc.user_buffer_offset;
2162 sg_bufp += ALIGN(bp->length, sizeof(u64)); 2194 sg_bufp += ALIGN(bp->length, sizeof(u64));
2163 2195
2164 ret = binder_fixup_parent(t, thread, bp, off_start, 2196 ret = binder_fixup_parent(t, thread, bp, off_start,
@@ -2920,7 +2952,7 @@ retry:
2920 tr.offsets_size = t->buffer->offsets_size; 2952 tr.offsets_size = t->buffer->offsets_size;
2921 tr.data.ptr.buffer = (binder_uintptr_t)( 2953 tr.data.ptr.buffer = (binder_uintptr_t)(
2922 (uintptr_t)t->buffer->data + 2954 (uintptr_t)t->buffer->data +
2923 proc->user_buffer_offset); 2955 proc->alloc.user_buffer_offset);
2924 tr.data.ptr.offsets = tr.data.ptr.buffer + 2956 tr.data.ptr.offsets = tr.data.ptr.buffer +
2925 ALIGN(t->buffer->data_size, 2957 ALIGN(t->buffer->data_size,
2926 sizeof(void *)); 2958 sizeof(void *));
@@ -3338,8 +3370,8 @@ static void binder_vma_close(struct vm_area_struct *vma)
3338 proc->pid, vma->vm_start, vma->vm_end, 3370 proc->pid, vma->vm_start, vma->vm_end,
3339 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 3371 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3340 (unsigned long)pgprot_val(vma->vm_page_prot)); 3372 (unsigned long)pgprot_val(vma->vm_page_prot));
3341 proc->vma = NULL; 3373 proc->alloc.vma = NULL;
3342 proc->vma_vm_mm = NULL; 3374 proc->alloc.vma_vm_mm = NULL;
3343 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); 3375 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
3344} 3376}
3345 3377
@@ -3382,7 +3414,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
3382 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; 3414 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
3383 3415
3384 mutex_lock(&binder_mmap_lock); 3416 mutex_lock(&binder_mmap_lock);
3385 if (proc->buffer) { 3417 if (proc->alloc.buffer) {
3386 ret = -EBUSY; 3418 ret = -EBUSY;
3387 failure_string = "already mapped"; 3419 failure_string = "already mapped";
3388 goto err_already_mapped; 3420 goto err_already_mapped;
@@ -3394,56 +3426,66 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
3394 failure_string = "get_vm_area"; 3426 failure_string = "get_vm_area";
3395 goto err_get_vm_area_failed; 3427 goto err_get_vm_area_failed;
3396 } 3428 }
3397 proc->buffer = area->addr; 3429 proc->alloc.buffer = area->addr;
3398 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; 3430 proc->alloc.user_buffer_offset =
3431 vma->vm_start - (uintptr_t)proc->alloc.buffer;
3399 mutex_unlock(&binder_mmap_lock); 3432 mutex_unlock(&binder_mmap_lock);
3400 3433
3401#ifdef CONFIG_CPU_CACHE_VIPT 3434#ifdef CONFIG_CPU_CACHE_VIPT
3402 if (cache_is_vipt_aliasing()) { 3435 if (cache_is_vipt_aliasing()) {
3403 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { 3436 while (CACHE_COLOUR(
3404 pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); 3437 (vma->vm_start ^ (uint32_t)proc->alloc.buffer))) {
3438 pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
3439 __func__,
3440 proc->pid, vma->vm_start,
3441 vma->vm_end, proc->alloc.buffer);
3405 vma->vm_start += PAGE_SIZE; 3442 vma->vm_start += PAGE_SIZE;
3406 } 3443 }
3407 } 3444 }
3408#endif 3445#endif
3409 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); 3446 proc->alloc.pages =
3410 if (proc->pages == NULL) { 3447 kzalloc(sizeof(proc->alloc.pages[0]) *
3448 ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
3449 GFP_KERNEL);
3450 if (proc->alloc.pages == NULL) {
3411 ret = -ENOMEM; 3451 ret = -ENOMEM;
3412 failure_string = "alloc page array"; 3452 failure_string = "alloc page array";
3413 goto err_alloc_pages_failed; 3453 goto err_alloc_pages_failed;
3414 } 3454 }
3415 proc->buffer_size = vma->vm_end - vma->vm_start; 3455 proc->alloc.buffer_size = vma->vm_end - vma->vm_start;
3416 3456
3417 vma->vm_ops = &binder_vm_ops; 3457 vma->vm_ops = &binder_vm_ops;
3418 vma->vm_private_data = proc; 3458 vma->vm_private_data = proc;
3419 3459
3420 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { 3460 if (binder_update_page_range(proc, 1, proc->alloc.buffer,
3461 proc->alloc.buffer + PAGE_SIZE, vma)) {
3421 ret = -ENOMEM; 3462 ret = -ENOMEM;
3422 failure_string = "alloc small buf"; 3463 failure_string = "alloc small buf";
3423 goto err_alloc_small_buf_failed; 3464 goto err_alloc_small_buf_failed;
3424 } 3465 }
3425 buffer = proc->buffer; 3466 buffer = proc->alloc.buffer;
3426 INIT_LIST_HEAD(&proc->buffers); 3467 INIT_LIST_HEAD(&proc->alloc.buffers);
3427 list_add(&buffer->entry, &proc->buffers); 3468 list_add(&buffer->entry, &proc->alloc.buffers);
3428 buffer->free = 1; 3469 buffer->free = 1;
3429 binder_insert_free_buffer(proc, buffer); 3470 binder_insert_free_buffer(proc, buffer);
3430 proc->free_async_space = proc->buffer_size / 2; 3471 proc->alloc.free_async_space = proc->alloc.buffer_size / 2;
3431 barrier(); 3472 barrier();
3432 proc->files = get_files_struct(current); 3473 proc->files = get_files_struct(current);
3433 proc->vma = vma; 3474 proc->alloc.vma = vma;
3434 proc->vma_vm_mm = vma->vm_mm; 3475 proc->alloc.vma_vm_mm = vma->vm_mm;
3435 3476
3436 /*pr_info("binder_mmap: %d %lx-%lx maps %p\n", 3477 /*pr_info("binder_mmap: %d %lx-%lx maps %pK\n",
3437 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ 3478 * proc->pid, vma->vm_start, vma->vm_end, proc->alloc.buffer);
3479 */
3438 return 0; 3480 return 0;
3439 3481
3440err_alloc_small_buf_failed: 3482err_alloc_small_buf_failed:
3441 kfree(proc->pages); 3483 kfree(proc->alloc.pages);
3442 proc->pages = NULL; 3484 proc->alloc.pages = NULL;
3443err_alloc_pages_failed: 3485err_alloc_pages_failed:
3444 mutex_lock(&binder_mmap_lock); 3486 mutex_lock(&binder_mmap_lock);
3445 vfree(proc->buffer); 3487 vfree(proc->alloc.buffer);
3446 proc->buffer = NULL; 3488 proc->alloc.buffer = NULL;
3447err_get_vm_area_failed: 3489err_get_vm_area_failed:
3448err_already_mapped: 3490err_already_mapped:
3449 mutex_unlock(&binder_mmap_lock); 3491 mutex_unlock(&binder_mmap_lock);
@@ -3595,7 +3637,7 @@ static void binder_deferred_release(struct binder_proc *proc)
3595 int threads, nodes, incoming_refs, outgoing_refs, buffers, 3637 int threads, nodes, incoming_refs, outgoing_refs, buffers,
3596 active_transactions, page_count; 3638 active_transactions, page_count;
3597 3639
3598 BUG_ON(proc->vma); 3640 BUG_ON(proc->alloc.vma);
3599 BUG_ON(proc->files); 3641 BUG_ON(proc->files);
3600 3642
3601 hlist_del(&proc->proc_node); 3643 hlist_del(&proc->proc_node);
@@ -3642,7 +3684,7 @@ static void binder_deferred_release(struct binder_proc *proc)
3642 binder_release_work(&proc->delivered_death); 3684 binder_release_work(&proc->delivered_death);
3643 3685
3644 buffers = 0; 3686 buffers = 0;
3645 while ((n = rb_first(&proc->allocated_buffers))) { 3687 while ((n = rb_first(&proc->alloc.allocated_buffers))) {
3646 struct binder_buffer *buffer; 3688 struct binder_buffer *buffer;
3647 3689
3648 buffer = rb_entry(n, struct binder_buffer, rb_node); 3690 buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -3663,25 +3705,25 @@ static void binder_deferred_release(struct binder_proc *proc)
3663 binder_stats_deleted(BINDER_STAT_PROC); 3705 binder_stats_deleted(BINDER_STAT_PROC);
3664 3706
3665 page_count = 0; 3707 page_count = 0;
3666 if (proc->pages) { 3708 if (proc->alloc.pages) {
3667 int i; 3709 int i;
3668 3710
3669 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { 3711 for (i = 0; i < proc->alloc.buffer_size / PAGE_SIZE; i++) {
3670 void *page_addr; 3712 void *page_addr;
3671 3713
3672 if (!proc->pages[i]) 3714 if (!proc->alloc.pages[i])
3673 continue; 3715 continue;
3674 3716
3675 page_addr = proc->buffer + i * PAGE_SIZE; 3717 page_addr = proc->alloc.buffer + i * PAGE_SIZE;
3676 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 3718 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
3677 "%s: %d: page %d at %p not freed\n", 3719 "%s: %d: page %d at %p not freed\n",
3678 __func__, proc->pid, i, page_addr); 3720 __func__, proc->pid, i, page_addr);
3679 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 3721 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
3680 __free_page(proc->pages[i]); 3722 __free_page(proc->alloc.pages[i]);
3681 page_count++; 3723 page_count++;
3682 } 3724 }
3683 kfree(proc->pages); 3725 kfree(proc->alloc.pages);
3684 vfree(proc->buffer); 3726 vfree(proc->alloc.buffer);
3685 } 3727 }
3686 3728
3687 put_task_struct(proc->tsk); 3729 put_task_struct(proc->tsk);
@@ -3911,7 +3953,8 @@ static void print_binder_proc(struct seq_file *m,
3911 print_binder_ref(m, rb_entry(n, struct binder_ref, 3953 print_binder_ref(m, rb_entry(n, struct binder_ref,
3912 rb_node_desc)); 3954 rb_node_desc));
3913 } 3955 }
3914 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) 3956 for (n = rb_first(&proc->alloc.allocated_buffers);
3957 n != NULL; n = rb_next(n))
3915 print_binder_buffer(m, " buffer", 3958 print_binder_buffer(m, " buffer",
3916 rb_entry(n, struct binder_buffer, rb_node)); 3959 rb_entry(n, struct binder_buffer, rb_node));
3917 list_for_each_entry(w, &proc->todo, entry) 3960 list_for_each_entry(w, &proc->todo, entry)
@@ -4028,7 +4071,7 @@ static void print_binder_proc_stats(struct seq_file *m,
4028 " ready threads %d\n" 4071 " ready threads %d\n"
4029 " free async space %zd\n", proc->requested_threads, 4072 " free async space %zd\n", proc->requested_threads,
4030 proc->requested_threads_started, proc->max_threads, 4073 proc->requested_threads_started, proc->max_threads,
4031 proc->ready_threads, proc->free_async_space); 4074 proc->ready_threads, proc->alloc.free_async_space);
4032 count = 0; 4075 count = 0;
4033 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 4076 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
4034 count++; 4077 count++;
@@ -4046,7 +4089,8 @@ static void print_binder_proc_stats(struct seq_file *m,
4046 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 4089 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
4047 4090
4048 count = 0; 4091 count = 0;
4049 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) 4092 for (n = rb_first(&proc->alloc.allocated_buffers);
4093 n != NULL; n = rb_next(n))
4050 count++; 4094 count++;
4051 seq_printf(m, " buffers: %d\n", count); 4095 seq_printf(m, " buffers: %d\n", count);
4052 4096
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 7f20f3dc8369..c835f09656c1 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -280,7 +280,7 @@ TRACE_EVENT(binder_update_page_range,
280 TP_fast_assign( 280 TP_fast_assign(
281 __entry->proc = proc->pid; 281 __entry->proc = proc->pid;
282 __entry->allocate = allocate; 282 __entry->allocate = allocate;
283 __entry->offset = start - proc->buffer; 283 __entry->offset = start - proc->alloc.buffer;
284 __entry->size = end - start; 284 __entry->size = end - start;
285 ), 285 ),
286 TP_printk("proc=%d allocate=%d offset=%zu size=%zu", 286 TP_printk("proc=%d allocate=%d offset=%zu size=%zu",