aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/android/binder.c
diff options
context:
space:
mode:
authorTodd Kjos <tkjos@android.com>2017-06-29 15:02:08 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-07-17 08:48:24 -0400
commit2c1838dc6817dd28cf24ba0c57cc8693be9bbfc5 (patch)
tree4c231a1b44cd8808867d112dced8c650ea0d906f /drivers/android/binder.c
parentb3e6861283790d78f298f2a1bc3ef5fd81b381f4 (diff)
binder: protect binder_ref with outer lock
Use proc->outer_lock to protect the binder_ref structure. The outer lock allows functions operating on the binder_ref to do nested acquires of node and inner locks as necessary to attach refs to nodes atomically. Binder refs must never be accesssed without holding the outer lock. Signed-off-by: Todd Kjos <tkjos@google.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android/binder.c')
-rw-r--r--drivers/android/binder.c133
1 files changed, 83 insertions, 50 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 704540ea3e12..f07f0d488aa4 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -475,7 +475,9 @@ enum binder_deferred_state {
475 * this proc ordered by node->ptr 475 * this proc ordered by node->ptr
476 * (protected by @inner_lock) 476 * (protected by @inner_lock)
477 * @refs_by_desc: rbtree of refs ordered by ref->desc 477 * @refs_by_desc: rbtree of refs ordered by ref->desc
478 * (protected by @outer_lock)
478 * @refs_by_node: rbtree of refs ordered by ref->node 479 * @refs_by_node: rbtree of refs ordered by ref->node
480 * (protected by @outer_lock)
479 * @pid PID of group_leader of process 481 * @pid PID of group_leader of process
480 * (invariant after initialized) 482 * (invariant after initialized)
481 * @tsk task_struct for group_leader of process 483 * @tsk task_struct for group_leader of process
@@ -1269,8 +1271,8 @@ static void binder_put_node(struct binder_node *node)
1269 binder_dec_node_tmpref(node); 1271 binder_dec_node_tmpref(node);
1270} 1272}
1271 1273
1272static struct binder_ref *binder_get_ref(struct binder_proc *proc, 1274static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1273 u32 desc, bool need_strong_ref) 1275 u32 desc, bool need_strong_ref)
1274{ 1276{
1275 struct rb_node *n = proc->refs_by_desc.rb_node; 1277 struct rb_node *n = proc->refs_by_desc.rb_node;
1276 struct binder_ref *ref; 1278 struct binder_ref *ref;
@@ -1293,7 +1295,7 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
1293} 1295}
1294 1296
1295/** 1297/**
1296 * binder_get_ref_for_node() - get the ref associated with given node 1298 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1297 * @proc: binder_proc that owns the ref 1299 * @proc: binder_proc that owns the ref
1298 * @node: binder_node of target 1300 * @node: binder_node of target
1299 * @new_ref: newly allocated binder_ref to be initialized or %NULL 1301 * @new_ref: newly allocated binder_ref to be initialized or %NULL
@@ -1310,9 +1312,10 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
1310 * new_ref. new_ref must be kfree'd by the caller in 1312 * new_ref. new_ref must be kfree'd by the caller in
1311 * this case. 1313 * this case.
1312 */ 1314 */
1313static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, 1315static struct binder_ref *binder_get_ref_for_node_olocked(
1314 struct binder_node *node, 1316 struct binder_proc *proc,
1315 struct binder_ref *new_ref) 1317 struct binder_node *node,
1318 struct binder_ref *new_ref)
1316{ 1319{
1317 struct binder_context *context = proc->context; 1320 struct binder_context *context = proc->context;
1318 struct rb_node **p = &proc->refs_by_node.rb_node; 1321 struct rb_node **p = &proc->refs_by_node.rb_node;
@@ -1375,7 +1378,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1375 return new_ref; 1378 return new_ref;
1376} 1379}
1377 1380
1378static void binder_cleanup_ref(struct binder_ref *ref) 1381static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1379{ 1382{
1380 bool delete_node = false; 1383 bool delete_node = false;
1381 1384
@@ -1418,17 +1421,17 @@ static void binder_cleanup_ref(struct binder_ref *ref)
1418} 1421}
1419 1422
1420/** 1423/**
1421 * binder_inc_ref() - increment the ref for given handle 1424 * binder_inc_ref_olocked() - increment the ref for given handle
1422 * @ref: ref to be incremented 1425 * @ref: ref to be incremented
1423 * @strong: if true, strong increment, else weak 1426 * @strong: if true, strong increment, else weak
1424 * @target_list: list to queue node work on 1427 * @target_list: list to queue node work on
1425 * 1428 *
1426 * Increment the ref. 1429 * Increment the ref. @ref->proc->outer_lock must be held on entry
1427 * 1430 *
1428 * Return: 0, if successful, else errno 1431 * Return: 0, if successful, else errno
1429 */ 1432 */
1430static int binder_inc_ref(struct binder_ref *ref, int strong, 1433static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1431 struct list_head *target_list) 1434 struct list_head *target_list)
1432{ 1435{
1433 int ret; 1436 int ret;
1434 1437
@@ -1457,12 +1460,9 @@ static int binder_inc_ref(struct binder_ref *ref, int strong,
1457 * 1460 *
1458 * Decrement the ref. 1461 * Decrement the ref.
1459 * 1462 *
1460 * TODO: kfree is avoided here since an upcoming patch
1461 * will put this under a lock.
1462 *
1463 * Return: true if ref is cleaned up and ready to be freed 1463 * Return: true if ref is cleaned up and ready to be freed
1464 */ 1464 */
1465static bool binder_dec_ref(struct binder_ref *ref, int strong) 1465static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1466{ 1466{
1467 if (strong) { 1467 if (strong) {
1468 if (ref->data.strong == 0) { 1468 if (ref->data.strong == 0) {
@@ -1486,13 +1486,7 @@ static bool binder_dec_ref(struct binder_ref *ref, int strong)
1486 ref->data.weak--; 1486 ref->data.weak--;
1487 } 1487 }
1488 if (ref->data.strong == 0 && ref->data.weak == 0) { 1488 if (ref->data.strong == 0 && ref->data.weak == 0) {
1489 binder_cleanup_ref(ref); 1489 binder_cleanup_ref_olocked(ref);
1490 /*
1491 * TODO: we could kfree(ref) here, but an upcoming
1492 * patch will call this with a lock held, so we
1493 * return an indication that the ref should be
1494 * freed.
1495 */
1496 return true; 1490 return true;
1497 } 1491 }
1498 return false; 1492 return false;
@@ -1517,7 +1511,8 @@ static struct binder_node *binder_get_node_from_ref(
1517 struct binder_node *node; 1511 struct binder_node *node;
1518 struct binder_ref *ref; 1512 struct binder_ref *ref;
1519 1513
1520 ref = binder_get_ref(proc, desc, need_strong_ref); 1514 binder_proc_lock(proc);
1515 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1521 if (!ref) 1516 if (!ref)
1522 goto err_no_ref; 1517 goto err_no_ref;
1523 node = ref->node; 1518 node = ref->node;
@@ -1528,10 +1523,12 @@ static struct binder_node *binder_get_node_from_ref(
1528 binder_inc_node_tmpref(node); 1523 binder_inc_node_tmpref(node);
1529 if (rdata) 1524 if (rdata)
1530 *rdata = ref->data; 1525 *rdata = ref->data;
1526 binder_proc_unlock(proc);
1531 1527
1532 return node; 1528 return node;
1533 1529
1534err_no_ref: 1530err_no_ref:
1531 binder_proc_unlock(proc);
1535 return NULL; 1532 return NULL;
1536} 1533}
1537 1534
@@ -1571,24 +1568,27 @@ static int binder_update_ref_for_handle(struct binder_proc *proc,
1571 struct binder_ref *ref; 1568 struct binder_ref *ref;
1572 bool delete_ref = false; 1569 bool delete_ref = false;
1573 1570
1574 ref = binder_get_ref(proc, desc, strong); 1571 binder_proc_lock(proc);
1572 ref = binder_get_ref_olocked(proc, desc, strong);
1575 if (!ref) { 1573 if (!ref) {
1576 ret = -EINVAL; 1574 ret = -EINVAL;
1577 goto err_no_ref; 1575 goto err_no_ref;
1578 } 1576 }
1579 if (increment) 1577 if (increment)
1580 ret = binder_inc_ref(ref, strong, NULL); 1578 ret = binder_inc_ref_olocked(ref, strong, NULL);
1581 else 1579 else
1582 delete_ref = binder_dec_ref(ref, strong); 1580 delete_ref = binder_dec_ref_olocked(ref, strong);
1583 1581
1584 if (rdata) 1582 if (rdata)
1585 *rdata = ref->data; 1583 *rdata = ref->data;
1584 binder_proc_unlock(proc);
1586 1585
1587 if (delete_ref) 1586 if (delete_ref)
1588 binder_free_ref(ref); 1587 binder_free_ref(ref);
1589 return ret; 1588 return ret;
1590 1589
1591err_no_ref: 1590err_no_ref:
1591 binder_proc_unlock(proc);
1592 return ret; 1592 return ret;
1593} 1593}
1594 1594
@@ -1633,15 +1633,19 @@ static int binder_inc_ref_for_node(struct binder_proc *proc,
1633 struct binder_ref *new_ref = NULL; 1633 struct binder_ref *new_ref = NULL;
1634 int ret = 0; 1634 int ret = 0;
1635 1635
1636 ref = binder_get_ref_for_node(proc, node, NULL); 1636 binder_proc_lock(proc);
1637 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1637 if (!ref) { 1638 if (!ref) {
1639 binder_proc_unlock(proc);
1638 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1640 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1639 if (!new_ref) 1641 if (!new_ref)
1640 return -ENOMEM; 1642 return -ENOMEM;
1641 ref = binder_get_ref_for_node(proc, node, new_ref); 1643 binder_proc_lock(proc);
1644 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1642 } 1645 }
1643 ret = binder_inc_ref(ref, strong, target_list); 1646 ret = binder_inc_ref_olocked(ref, strong, target_list);
1644 *rdata = ref->data; 1647 *rdata = ref->data;
1648 binder_proc_unlock(proc);
1645 if (new_ref && ref != new_ref) 1649 if (new_ref && ref != new_ref)
1646 /* 1650 /*
1647 * Another thread created the ref first so 1651 * Another thread created the ref first so
@@ -2497,11 +2501,14 @@ static void binder_transaction(struct binder_proc *proc,
2497 * stays alive until the transaction is 2501 * stays alive until the transaction is
2498 * done. 2502 * done.
2499 */ 2503 */
2500 ref = binder_get_ref(proc, tr->target.handle, true); 2504 binder_proc_lock(proc);
2505 ref = binder_get_ref_olocked(proc, tr->target.handle,
2506 true);
2501 if (ref) { 2507 if (ref) {
2502 binder_inc_node(ref->node, 1, 0, NULL); 2508 binder_inc_node(ref->node, 1, 0, NULL);
2503 target_node = ref->node; 2509 target_node = ref->node;
2504 } 2510 }
2511 binder_proc_unlock(proc);
2505 if (target_node == NULL) { 2512 if (target_node == NULL) {
2506 binder_user_error("%d:%d got transaction to invalid handle\n", 2513 binder_user_error("%d:%d got transaction to invalid handle\n",
2507 proc->pid, thread->pid); 2514 proc->pid, thread->pid);
@@ -3277,7 +3284,7 @@ static int binder_thread_write(struct binder_proc *proc,
3277 uint32_t target; 3284 uint32_t target;
3278 binder_uintptr_t cookie; 3285 binder_uintptr_t cookie;
3279 struct binder_ref *ref; 3286 struct binder_ref *ref;
3280 struct binder_ref_death *death; 3287 struct binder_ref_death *death = NULL;
3281 3288
3282 if (get_user(target, (uint32_t __user *)ptr)) 3289 if (get_user(target, (uint32_t __user *)ptr))
3283 return -EFAULT; 3290 return -EFAULT;
@@ -3285,7 +3292,29 @@ static int binder_thread_write(struct binder_proc *proc,
3285 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 3292 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3286 return -EFAULT; 3293 return -EFAULT;
3287 ptr += sizeof(binder_uintptr_t); 3294 ptr += sizeof(binder_uintptr_t);
3288 ref = binder_get_ref(proc, target, false); 3295 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3296 /*
3297 * Allocate memory for death notification
3298 * before taking lock
3299 */
3300 death = kzalloc(sizeof(*death), GFP_KERNEL);
3301 if (death == NULL) {
3302 WARN_ON(thread->return_error.cmd !=
3303 BR_OK);
3304 thread->return_error.cmd = BR_ERROR;
3305 binder_enqueue_work(
3306 thread->proc,
3307 &thread->return_error.work,
3308 &thread->todo);
3309 binder_debug(
3310 BINDER_DEBUG_FAILED_TRANSACTION,
3311 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3312 proc->pid, thread->pid);
3313 break;
3314 }
3315 }
3316 binder_proc_lock(proc);
3317 ref = binder_get_ref_olocked(proc, target, false);
3289 if (ref == NULL) { 3318 if (ref == NULL) {
3290 binder_user_error("%d:%d %s invalid ref %d\n", 3319 binder_user_error("%d:%d %s invalid ref %d\n",
3291 proc->pid, thread->pid, 3320 proc->pid, thread->pid,
@@ -3293,6 +3322,8 @@ static int binder_thread_write(struct binder_proc *proc,
3293 "BC_REQUEST_DEATH_NOTIFICATION" : 3322 "BC_REQUEST_DEATH_NOTIFICATION" :
3294 "BC_CLEAR_DEATH_NOTIFICATION", 3323 "BC_CLEAR_DEATH_NOTIFICATION",
3295 target); 3324 target);
3325 binder_proc_unlock(proc);
3326 kfree(death);
3296 break; 3327 break;
3297 } 3328 }
3298 3329
@@ -3310,20 +3341,8 @@ static int binder_thread_write(struct binder_proc *proc,
3310 if (ref->death) { 3341 if (ref->death) {
3311 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 3342 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3312 proc->pid, thread->pid); 3343 proc->pid, thread->pid);
3313 break; 3344 binder_proc_unlock(proc);
3314 } 3345 kfree(death);
3315 death = kzalloc(sizeof(*death), GFP_KERNEL);
3316 if (death == NULL) {
3317 WARN_ON(thread->return_error.cmd !=
3318 BR_OK);
3319 thread->return_error.cmd = BR_ERROR;
3320 binder_enqueue_work(
3321 thread->proc,
3322 &thread->return_error.work,
3323 &thread->todo);
3324 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3325 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3326 proc->pid, thread->pid);
3327 break; 3346 break;
3328 } 3347 }
3329 binder_stats_created(BINDER_STAT_DEATH); 3348 binder_stats_created(BINDER_STAT_DEATH);
@@ -3356,6 +3375,7 @@ static int binder_thread_write(struct binder_proc *proc,
3356 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 3375 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3357 proc->pid, thread->pid); 3376 proc->pid, thread->pid);
3358 binder_node_unlock(ref->node); 3377 binder_node_unlock(ref->node);
3378 binder_proc_unlock(proc);
3359 break; 3379 break;
3360 } 3380 }
3361 death = ref->death; 3381 death = ref->death;
@@ -3365,6 +3385,7 @@ static int binder_thread_write(struct binder_proc *proc,
3365 (u64)death->cookie, 3385 (u64)death->cookie,
3366 (u64)cookie); 3386 (u64)cookie);
3367 binder_node_unlock(ref->node); 3387 binder_node_unlock(ref->node);
3388 binder_proc_unlock(proc);
3368 break; 3389 break;
3369 } 3390 }
3370 ref->death = NULL; 3391 ref->death = NULL;
@@ -3391,6 +3412,7 @@ static int binder_thread_write(struct binder_proc *proc,
3391 binder_inner_proc_unlock(proc); 3412 binder_inner_proc_unlock(proc);
3392 binder_node_unlock(ref->node); 3413 binder_node_unlock(ref->node);
3393 } 3414 }
3415 binder_proc_unlock(proc);
3394 } break; 3416 } break;
3395 case BC_DEAD_BINDER_DONE: { 3417 case BC_DEAD_BINDER_DONE: {
3396 struct binder_work *w; 3418 struct binder_work *w;
@@ -4601,14 +4623,18 @@ static void binder_deferred_release(struct binder_proc *proc)
4601 binder_inner_proc_unlock(proc); 4623 binder_inner_proc_unlock(proc);
4602 4624
4603 outgoing_refs = 0; 4625 outgoing_refs = 0;
4626 binder_proc_lock(proc);
4604 while ((n = rb_first(&proc->refs_by_desc))) { 4627 while ((n = rb_first(&proc->refs_by_desc))) {
4605 struct binder_ref *ref; 4628 struct binder_ref *ref;
4606 4629
4607 ref = rb_entry(n, struct binder_ref, rb_node_desc); 4630 ref = rb_entry(n, struct binder_ref, rb_node_desc);
4608 outgoing_refs++; 4631 outgoing_refs++;
4609 binder_cleanup_ref(ref); 4632 binder_cleanup_ref_olocked(ref);
4633 binder_proc_unlock(proc);
4610 binder_free_ref(ref); 4634 binder_free_ref(ref);
4635 binder_proc_lock(proc);
4611 } 4636 }
4637 binder_proc_unlock(proc);
4612 4638
4613 binder_release_work(proc, &proc->todo); 4639 binder_release_work(proc, &proc->todo);
4614 binder_release_work(proc, &proc->delivered_death); 4640 binder_release_work(proc, &proc->delivered_death);
@@ -4816,8 +4842,10 @@ static void print_binder_node_nilocked(struct seq_file *m,
4816 } 4842 }
4817} 4843}
4818 4844
4819static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) 4845static void print_binder_ref_olocked(struct seq_file *m,
4846 struct binder_ref *ref)
4820{ 4847{
4848 WARN_ON(!spin_is_locked(&ref->proc->outer_lock));
4821 binder_node_lock(ref->node); 4849 binder_node_lock(ref->node);
4822 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n", 4850 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
4823 ref->data.debug_id, ref->data.desc, 4851 ref->data.debug_id, ref->data.desc,
@@ -4869,11 +4897,14 @@ static void print_binder_proc(struct seq_file *m,
4869 binder_put_node(last_node); 4897 binder_put_node(last_node);
4870 4898
4871 if (print_all) { 4899 if (print_all) {
4900 binder_proc_lock(proc);
4872 for (n = rb_first(&proc->refs_by_desc); 4901 for (n = rb_first(&proc->refs_by_desc);
4873 n != NULL; 4902 n != NULL;
4874 n = rb_next(n)) 4903 n = rb_next(n))
4875 print_binder_ref(m, rb_entry(n, struct binder_ref, 4904 print_binder_ref_olocked(m, rb_entry(n,
4876 rb_node_desc)); 4905 struct binder_ref,
4906 rb_node_desc));
4907 binder_proc_unlock(proc);
4877 } 4908 }
4878 binder_alloc_print_allocated(m, &proc->alloc); 4909 binder_alloc_print_allocated(m, &proc->alloc);
4879 binder_inner_proc_lock(proc); 4910 binder_inner_proc_lock(proc);
@@ -5013,6 +5044,7 @@ static void print_binder_proc_stats(struct seq_file *m,
5013 count = 0; 5044 count = 0;
5014 strong = 0; 5045 strong = 0;
5015 weak = 0; 5046 weak = 0;
5047 binder_proc_lock(proc);
5016 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 5048 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5017 struct binder_ref *ref = rb_entry(n, struct binder_ref, 5049 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5018 rb_node_desc); 5050 rb_node_desc);
@@ -5020,6 +5052,7 @@ static void print_binder_proc_stats(struct seq_file *m,
5020 strong += ref->data.strong; 5052 strong += ref->data.strong;
5021 weak += ref->data.weak; 5053 weak += ref->data.weak;
5022 } 5054 }
5055 binder_proc_unlock(proc);
5023 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 5056 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5024 5057
5025 count = binder_alloc_get_allocated_count(&proc->alloc); 5058 count = binder_alloc_get_allocated_count(&proc->alloc);