aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/android/binder_alloc.c
diff options
context:
space:
mode:
authorSherry Yang <sherryy@android.com>2017-08-23 11:46:41 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-08-28 10:47:17 -0400
commit74310e06be4d74dcf67cd108366710dee5c576d5 (patch)
tree57be21314e48f8b30bdd9831eb6a0bb252808bc9 /drivers/android/binder_alloc.c
parent4175e2b46fd4b9021ef81f18f1be9474b2f45d4a (diff)
android: binder: Move buffer out of area shared with user space
Binder driver allocates buffer meta data in a region that is mapped in user space. These meta data contain pointers in the kernel. This patch allocates buffer meta data on the kernel heap that is not mapped in user space, and uses a pointer to refer to the data mapped. Signed-off-by: Sherry Yang <sherryy@android.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r--drivers/android/binder_alloc.c144
1 files changed, 84 insertions, 60 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index f15af2b55a62..e96659215f25 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -62,9 +62,9 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
62 struct binder_buffer *buffer) 62 struct binder_buffer *buffer)
63{ 63{
64 if (list_is_last(&buffer->entry, &alloc->buffers)) 64 if (list_is_last(&buffer->entry, &alloc->buffers))
65 return alloc->buffer + 65 return (u8 *)alloc->buffer +
66 alloc->buffer_size - (void *)buffer->data; 66 alloc->buffer_size - (u8 *)buffer->data;
67 return (size_t)binder_buffer_next(buffer) - (size_t)buffer->data; 67 return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
68} 68}
69 69
70static void binder_insert_free_buffer(struct binder_alloc *alloc, 70static void binder_insert_free_buffer(struct binder_alloc *alloc,
@@ -114,9 +114,9 @@ static void binder_insert_allocated_buffer_locked(
114 buffer = rb_entry(parent, struct binder_buffer, rb_node); 114 buffer = rb_entry(parent, struct binder_buffer, rb_node);
115 BUG_ON(buffer->free); 115 BUG_ON(buffer->free);
116 116
117 if (new_buffer < buffer) 117 if (new_buffer->data < buffer->data)
118 p = &parent->rb_left; 118 p = &parent->rb_left;
119 else if (new_buffer > buffer) 119 else if (new_buffer->data > buffer->data)
120 p = &parent->rb_right; 120 p = &parent->rb_right;
121 else 121 else
122 BUG(); 122 BUG();
@@ -131,18 +131,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
131{ 131{
132 struct rb_node *n = alloc->allocated_buffers.rb_node; 132 struct rb_node *n = alloc->allocated_buffers.rb_node;
133 struct binder_buffer *buffer; 133 struct binder_buffer *buffer;
134 struct binder_buffer *kern_ptr; 134 void *kern_ptr;
135 135
136 kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset 136 kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
137 - offsetof(struct binder_buffer, data));
138 137
139 while (n) { 138 while (n) {
140 buffer = rb_entry(n, struct binder_buffer, rb_node); 139 buffer = rb_entry(n, struct binder_buffer, rb_node);
141 BUG_ON(buffer->free); 140 BUG_ON(buffer->free);
142 141
143 if (kern_ptr < buffer) 142 if (kern_ptr < buffer->data)
144 n = n->rb_left; 143 n = n->rb_left;
145 else if (kern_ptr > buffer) 144 else if (kern_ptr > buffer->data)
146 n = n->rb_right; 145 n = n->rb_right;
147 else { 146 else {
148 /* 147 /*
@@ -330,6 +329,9 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
330 return ERR_PTR(-ENOSPC); 329 return ERR_PTR(-ENOSPC);
331 } 330 }
332 331
332 /* Pad 0-size buffers so they get assigned unique addresses */
333 size = max(size, sizeof(void *));
334
333 while (n) { 335 while (n) {
334 buffer = rb_entry(n, struct binder_buffer, rb_node); 336 buffer = rb_entry(n, struct binder_buffer, rb_node);
335 BUG_ON(!buffer->free); 337 BUG_ON(!buffer->free);
@@ -389,14 +391,9 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
389 391
390 has_page_addr = 392 has_page_addr =
391 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); 393 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
392 if (n == NULL) { 394 WARN_ON(n && buffer_size != size);
393 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
394 buffer_size = size; /* no room for other buffers */
395 else
396 buffer_size = size + sizeof(struct binder_buffer);
397 }
398 end_page_addr = 395 end_page_addr =
399 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); 396 (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
400 if (end_page_addr > has_page_addr) 397 if (end_page_addr > has_page_addr)
401 end_page_addr = has_page_addr; 398 end_page_addr = has_page_addr;
402 ret = binder_update_page_range(alloc, 1, 399 ret = binder_update_page_range(alloc, 1,
@@ -404,17 +401,25 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
404 if (ret) 401 if (ret)
405 return ERR_PTR(ret); 402 return ERR_PTR(ret);
406 403
407 rb_erase(best_fit, &alloc->free_buffers);
408 buffer->free = 0;
409 buffer->free_in_progress = 0;
410 binder_insert_allocated_buffer_locked(alloc, buffer);
411 if (buffer_size != size) { 404 if (buffer_size != size) {
412 struct binder_buffer *new_buffer = (void *)buffer->data + size; 405 struct binder_buffer *new_buffer;
413 406
407 new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
408 if (!new_buffer) {
409 pr_err("%s: %d failed to alloc new buffer struct\n",
410 __func__, alloc->pid);
411 goto err_alloc_buf_struct_failed;
412 }
413 new_buffer->data = (u8 *)buffer->data + size;
414 list_add(&new_buffer->entry, &buffer->entry); 414 list_add(&new_buffer->entry, &buffer->entry);
415 new_buffer->free = 1; 415 new_buffer->free = 1;
416 binder_insert_free_buffer(alloc, new_buffer); 416 binder_insert_free_buffer(alloc, new_buffer);
417 } 417 }
418
419 rb_erase(best_fit, &alloc->free_buffers);
420 buffer->free = 0;
421 buffer->free_in_progress = 0;
422 binder_insert_allocated_buffer_locked(alloc, buffer);
418 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 423 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
419 "%d: binder_alloc_buf size %zd got %pK\n", 424 "%d: binder_alloc_buf size %zd got %pK\n",
420 alloc->pid, size, buffer); 425 alloc->pid, size, buffer);
@@ -429,6 +434,12 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
429 alloc->pid, size, alloc->free_async_space); 434 alloc->pid, size, alloc->free_async_space);
430 } 435 }
431 return buffer; 436 return buffer;
437
438err_alloc_buf_struct_failed:
439 binder_update_page_range(alloc, 0,
440 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
441 end_page_addr, NULL);
442 return ERR_PTR(-ENOMEM);
432} 443}
433 444
434/** 445/**
@@ -463,56 +474,59 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
463 474
464static void *buffer_start_page(struct binder_buffer *buffer) 475static void *buffer_start_page(struct binder_buffer *buffer)
465{ 476{
466 return (void *)((uintptr_t)buffer & PAGE_MASK); 477 return (void *)((uintptr_t)buffer->data & PAGE_MASK);
467} 478}
468 479
469static void *buffer_end_page(struct binder_buffer *buffer) 480static void *prev_buffer_end_page(struct binder_buffer *buffer)
470{ 481{
471 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); 482 return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
472} 483}
473 484
474static void binder_delete_free_buffer(struct binder_alloc *alloc, 485static void binder_delete_free_buffer(struct binder_alloc *alloc,
475 struct binder_buffer *buffer) 486 struct binder_buffer *buffer)
476{ 487{
477 struct binder_buffer *prev, *next = NULL; 488 struct binder_buffer *prev, *next = NULL;
478 int free_page_end = 1; 489 bool to_free = true;
479 int free_page_start = 1;
480
481 BUG_ON(alloc->buffers.next == &buffer->entry); 490 BUG_ON(alloc->buffers.next == &buffer->entry);
482 prev = binder_buffer_prev(buffer); 491 prev = binder_buffer_prev(buffer);
483 BUG_ON(!prev->free); 492 BUG_ON(!prev->free);
484 if (buffer_end_page(prev) == buffer_start_page(buffer)) { 493 if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
485 free_page_start = 0; 494 to_free = false;
486 if (buffer_end_page(prev) == buffer_end_page(buffer))
487 free_page_end = 0;
488 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 495 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
489 "%d: merge free, buffer %pK share page with %pK\n", 496 "%d: merge free, buffer %pK share page with %pK\n",
490 alloc->pid, buffer, prev); 497 alloc->pid, buffer->data, prev->data);
491 } 498 }
492 499
493 if (!list_is_last(&buffer->entry, &alloc->buffers)) { 500 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
494 next = binder_buffer_next(buffer); 501 next = binder_buffer_next(buffer);
495 if (buffer_start_page(next) == buffer_end_page(buffer)) { 502 if (buffer_start_page(next) == buffer_start_page(buffer)) {
496 free_page_end = 0; 503 to_free = false;
497 if (buffer_start_page(next) ==
498 buffer_start_page(buffer))
499 free_page_start = 0;
500 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 504 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
501 "%d: merge free, buffer %pK share page with %pK\n", 505 "%d: merge free, buffer %pK share page with %pK\n",
502 alloc->pid, buffer, prev); 506 alloc->pid,
507 buffer->data,
508 next->data);
503 } 509 }
504 } 510 }
505 list_del(&buffer->entry); 511
506 if (free_page_start || free_page_end) { 512 if (PAGE_ALIGNED(buffer->data)) {
513 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
514 "%d: merge free, buffer start %pK is page aligned\n",
515 alloc->pid, buffer->data);
516 to_free = false;
517 }
518
519 if (to_free) {
507 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 520 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
508 "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n", 521 "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
509 alloc->pid, buffer, free_page_start ? "" : " end", 522 alloc->pid, buffer->data,
510 free_page_end ? "" : " start", prev, next); 523 prev->data, next->data);
511 binder_update_page_range(alloc, 0, free_page_start ? 524 binder_update_page_range(alloc, 0, buffer_start_page(buffer),
512 buffer_start_page(buffer) : buffer_end_page(buffer), 525 buffer_start_page(buffer) + PAGE_SIZE,
513 (free_page_end ? buffer_end_page(buffer) : 526 NULL);
514 buffer_start_page(buffer)) + PAGE_SIZE, NULL);
515 } 527 }
528 list_del(&buffer->entry);
529 kfree(buffer);
516} 530}
517 531
518static void binder_free_buf_locked(struct binder_alloc *alloc, 532static void binder_free_buf_locked(struct binder_alloc *alloc,
@@ -533,8 +547,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
533 BUG_ON(buffer->free); 547 BUG_ON(buffer->free);
534 BUG_ON(size > buffer_size); 548 BUG_ON(size > buffer_size);
535 BUG_ON(buffer->transaction != NULL); 549 BUG_ON(buffer->transaction != NULL);
536 BUG_ON((void *)buffer < alloc->buffer); 550 BUG_ON(buffer->data < alloc->buffer);
537 BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size); 551 BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
538 552
539 if (buffer->async_transaction) { 553 if (buffer->async_transaction) {
540 alloc->free_async_space += size + sizeof(struct binder_buffer); 554 alloc->free_async_space += size + sizeof(struct binder_buffer);
@@ -646,13 +660,14 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
646 } 660 }
647 alloc->buffer_size = vma->vm_end - vma->vm_start; 661 alloc->buffer_size = vma->vm_end - vma->vm_start;
648 662
649 if (binder_update_page_range(alloc, 1, alloc->buffer, 663 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
650 alloc->buffer + PAGE_SIZE, vma)) { 664 if (!buffer) {
651 ret = -ENOMEM; 665 ret = -ENOMEM;
652 failure_string = "alloc small buf"; 666 failure_string = "alloc buffer struct";
653 goto err_alloc_small_buf_failed; 667 goto err_alloc_buf_struct_failed;
654 } 668 }
655 buffer = alloc->buffer; 669
670 buffer->data = alloc->buffer;
656 INIT_LIST_HEAD(&alloc->buffers); 671 INIT_LIST_HEAD(&alloc->buffers);
657 list_add(&buffer->entry, &alloc->buffers); 672 list_add(&buffer->entry, &alloc->buffers);
658 buffer->free = 1; 673 buffer->free = 1;
@@ -664,7 +679,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
664 679
665 return 0; 680 return 0;
666 681
667err_alloc_small_buf_failed: 682err_alloc_buf_struct_failed:
668 kfree(alloc->pages); 683 kfree(alloc->pages);
669 alloc->pages = NULL; 684 alloc->pages = NULL;
670err_alloc_pages_failed: 685err_alloc_pages_failed:
@@ -684,14 +699,13 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
684{ 699{
685 struct rb_node *n; 700 struct rb_node *n;
686 int buffers, page_count; 701 int buffers, page_count;
702 struct binder_buffer *buffer;
687 703
688 BUG_ON(alloc->vma); 704 BUG_ON(alloc->vma);
689 705
690 buffers = 0; 706 buffers = 0;
691 mutex_lock(&alloc->mutex); 707 mutex_lock(&alloc->mutex);
692 while ((n = rb_first(&alloc->allocated_buffers))) { 708 while ((n = rb_first(&alloc->allocated_buffers))) {
693 struct binder_buffer *buffer;
694
695 buffer = rb_entry(n, struct binder_buffer, rb_node); 709 buffer = rb_entry(n, struct binder_buffer, rb_node);
696 710
697 /* Transaction should already have been freed */ 711 /* Transaction should already have been freed */
@@ -701,6 +715,16 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
701 buffers++; 715 buffers++;
702 } 716 }
703 717
718 while (!list_empty(&alloc->buffers)) {
719 buffer = list_first_entry(&alloc->buffers,
720 struct binder_buffer, entry);
721 WARN_ON(!buffer->free);
722
723 list_del(&buffer->entry);
724 WARN_ON_ONCE(!list_empty(&alloc->buffers));
725 kfree(buffer);
726 }
727
704 page_count = 0; 728 page_count = 0;
705 if (alloc->pages) { 729 if (alloc->pages) {
706 int i; 730 int i;