aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/android/binder.c
diff options
context:
space:
mode:
authorMartijn Coenen <maco@android.com>2017-08-31 04:04:18 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-09-01 03:20:12 -0400
commit1b77e9dcc3da9359f5936a7a4a0b5b6585c5e37e (patch)
tree6cb0abfb5ca3a144a8b9a1e6b80d096c8d8f54f3 /drivers/android/binder.c
parent8ef4665aa129a14f3733efc651c53a3c6c47b500 (diff)
ANDROID: binder: remove proc waitqueue
Removes the process waitqueue, so that threads can only wait on the thread waitqueue. Whenever there is process work to do, pick a thread and wake it up. Having the caller pick a thread is helpful for things like priority inheritance. This also fixes an issue with using epoll(), since we no longer have to block on different waitqueues. Signed-off-by: Martijn Coenen <maco@android.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android/binder.c')
-rw-r--r--drivers/android/binder.c255
1 files changed, 181 insertions, 74 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 56b380292cc5..55a44c0b3b20 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -28,10 +28,10 @@
28 * binder_node_lock() and binder_node_unlock() are 28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel 29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists 30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->nodes) and all todo lists associated 31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * with the binder_proc (proc->todo, thread->todo, 32 * and all todo lists associated with the binder_proc
33 * proc->delivered_death and node->async_todo), as well as 33 * (proc->todo, thread->todo, proc->delivered_death and
34 * thread->transaction_stack 34 * node->async_todo), as well as thread->transaction_stack
35 * binder_inner_proc_lock() and binder_inner_proc_unlock() 35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel 36 * are used to acq/rel
37 * 37 *
@@ -475,6 +475,8 @@ enum binder_deferred_state {
475 * (protected by @outer_lock) 475 * (protected by @outer_lock)
476 * @refs_by_node: rbtree of refs ordered by ref->node 476 * @refs_by_node: rbtree of refs ordered by ref->node
477 * (protected by @outer_lock) 477 * (protected by @outer_lock)
478 * @waiting_threads: threads currently waiting for proc work
479 * (protected by @inner_lock)
478 * @pid PID of group_leader of process 480 * @pid PID of group_leader of process
479 * (invariant after initialized) 481 * (invariant after initialized)
480 * @tsk task_struct for group_leader of process 482 * @tsk task_struct for group_leader of process
@@ -504,8 +506,6 @@ enum binder_deferred_state {
504 * (protected by @inner_lock) 506 * (protected by @inner_lock)
505 * @requested_threads_started: number binder threads started 507 * @requested_threads_started: number binder threads started
506 * (protected by @inner_lock) 508 * (protected by @inner_lock)
507 * @ready_threads: number of threads waiting for proc work
508 * (protected by @inner_lock)
509 * @tmp_ref: temporary reference to indicate proc is in use 509 * @tmp_ref: temporary reference to indicate proc is in use
510 * (protected by @inner_lock) 510 * (protected by @inner_lock)
511 * @default_priority: default scheduler priority 511 * @default_priority: default scheduler priority
@@ -526,6 +526,7 @@ struct binder_proc {
526 struct rb_root nodes; 526 struct rb_root nodes;
527 struct rb_root refs_by_desc; 527 struct rb_root refs_by_desc;
528 struct rb_root refs_by_node; 528 struct rb_root refs_by_node;
529 struct list_head waiting_threads;
529 int pid; 530 int pid;
530 struct task_struct *tsk; 531 struct task_struct *tsk;
531 struct files_struct *files; 532 struct files_struct *files;
@@ -540,7 +541,6 @@ struct binder_proc {
540 int max_threads; 541 int max_threads;
541 int requested_threads; 542 int requested_threads;
542 int requested_threads_started; 543 int requested_threads_started;
543 int ready_threads;
544 int tmp_ref; 544 int tmp_ref;
545 long default_priority; 545 long default_priority;
546 struct dentry *debugfs_entry; 546 struct dentry *debugfs_entry;
@@ -556,6 +556,7 @@ enum {
556 BINDER_LOOPER_STATE_EXITED = 0x04, 556 BINDER_LOOPER_STATE_EXITED = 0x04,
557 BINDER_LOOPER_STATE_INVALID = 0x08, 557 BINDER_LOOPER_STATE_INVALID = 0x08,
558 BINDER_LOOPER_STATE_WAITING = 0x10, 558 BINDER_LOOPER_STATE_WAITING = 0x10,
559 BINDER_LOOPER_STATE_POLL = 0x20,
559}; 560};
560 561
561/** 562/**
@@ -564,6 +565,8 @@ enum {
564 * (invariant after initialization) 565 * (invariant after initialization)
565 * @rb_node: element for proc->threads rbtree 566 * @rb_node: element for proc->threads rbtree
566 * (protected by @proc->inner_lock) 567 * (protected by @proc->inner_lock)
568 * @waiting_thread_node: element for @proc->waiting_threads list
569 * (protected by @proc->inner_lock)
567 * @pid: PID for this thread 570 * @pid: PID for this thread
568 * (invariant after initialization) 571 * (invariant after initialization)
569 * @looper: bitmap of looping state 572 * @looper: bitmap of looping state
@@ -593,6 +596,7 @@ enum {
593struct binder_thread { 596struct binder_thread {
594 struct binder_proc *proc; 597 struct binder_proc *proc;
595 struct rb_node rb_node; 598 struct rb_node rb_node;
599 struct list_head waiting_thread_node;
596 int pid; 600 int pid;
597 int looper; /* only modified by this thread */ 601 int looper; /* only modified by this thread */
598 bool looper_need_return; /* can be written by other thread */ 602 bool looper_need_return; /* can be written by other thread */
@@ -920,6 +924,86 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
920 return retval; 924 return retval;
921} 925}
922 926
927static bool binder_has_work_ilocked(struct binder_thread *thread,
928 bool do_proc_work)
929{
930 return !binder_worklist_empty_ilocked(&thread->todo) ||
931 thread->looper_need_return ||
932 (do_proc_work &&
933 !binder_worklist_empty_ilocked(&thread->proc->todo));
934}
935
936static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
937{
938 bool has_work;
939
940 binder_inner_proc_lock(thread->proc);
941 has_work = binder_has_work_ilocked(thread, do_proc_work);
942 binder_inner_proc_unlock(thread->proc);
943
944 return has_work;
945}
946
947static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
948{
949 return !thread->transaction_stack &&
950 binder_worklist_empty_ilocked(&thread->todo) &&
951 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
952 BINDER_LOOPER_STATE_REGISTERED));
953}
954
955static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
956 bool sync)
957{
958 struct rb_node *n;
959 struct binder_thread *thread;
960
961 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
962 thread = rb_entry(n, struct binder_thread, rb_node);
963 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
964 binder_available_for_proc_work_ilocked(thread)) {
965 if (sync)
966 wake_up_interruptible_sync(&thread->wait);
967 else
968 wake_up_interruptible(&thread->wait);
969 }
970 }
971}
972
973static void binder_wakeup_proc_ilocked(struct binder_proc *proc, bool sync)
974{
975 struct binder_thread *thread;
976
977 BUG_ON(!spin_is_locked(&proc->inner_lock));
978 thread = list_first_entry_or_null(&proc->waiting_threads,
979 struct binder_thread,
980 waiting_thread_node);
981
982 if (thread) {
983 list_del_init(&thread->waiting_thread_node);
984 if (sync)
985 wake_up_interruptible_sync(&thread->wait);
986 else
987 wake_up_interruptible(&thread->wait);
988 return;
989 }
990
991 /* Didn't find a thread waiting for proc work; this can happen
992 * in two scenarios:
993 * 1. All threads are busy handling transactions
994 * In that case, one of those threads should call back into
995 * the kernel driver soon and pick up this work.
996 * 2. Threads are using the (e)poll interface, in which case
997 * they may be blocked on the waitqueue without having been
998 * added to waiting_threads. For this case, we just iterate
999 * over all threads not handling transaction work, and
1000 * wake them all up. We wake all because we don't know whether
1001 * a thread that called into (e)poll is handling non-binder
1002 * work currently.
1003 */
1004 binder_wakeup_poll_threads_ilocked(proc, sync);
1005}
1006
923static void binder_set_nice(long nice) 1007static void binder_set_nice(long nice)
924{ 1008{
925 long min_nice; 1009 long min_nice;
@@ -1138,7 +1222,7 @@ static bool binder_dec_node_nilocked(struct binder_node *node,
1138 if (proc && (node->has_strong_ref || node->has_weak_ref)) { 1222 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1139 if (list_empty(&node->work.entry)) { 1223 if (list_empty(&node->work.entry)) {
1140 binder_enqueue_work_ilocked(&node->work, &proc->todo); 1224 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1141 wake_up_interruptible(&node->proc->wait); 1225 binder_wakeup_proc_ilocked(proc, false);
1142 } 1226 }
1143 } else { 1227 } else {
1144 if (hlist_empty(&node->refs) && !node->local_strong_refs && 1228 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
@@ -2399,7 +2483,6 @@ static void binder_transaction(struct binder_proc *proc,
2399 struct binder_thread *target_thread = NULL; 2483 struct binder_thread *target_thread = NULL;
2400 struct binder_node *target_node = NULL; 2484 struct binder_node *target_node = NULL;
2401 struct list_head *target_list; 2485 struct list_head *target_list;
2402 wait_queue_head_t *target_wait;
2403 struct binder_transaction *in_reply_to = NULL; 2486 struct binder_transaction *in_reply_to = NULL;
2404 struct binder_transaction_log_entry *e; 2487 struct binder_transaction_log_entry *e;
2405 uint32_t return_error = 0; 2488 uint32_t return_error = 0;
@@ -2409,6 +2492,7 @@ static void binder_transaction(struct binder_proc *proc,
2409 binder_size_t last_fixup_min_off = 0; 2492 binder_size_t last_fixup_min_off = 0;
2410 struct binder_context *context = proc->context; 2493 struct binder_context *context = proc->context;
2411 int t_debug_id = atomic_inc_return(&binder_last_id); 2494 int t_debug_id = atomic_inc_return(&binder_last_id);
2495 bool wakeup_for_proc_work = false;
2412 2496
2413 e = binder_transaction_log_add(&binder_transaction_log); 2497 e = binder_transaction_log_add(&binder_transaction_log);
2414 e->debug_id = t_debug_id; 2498 e->debug_id = t_debug_id;
@@ -2572,10 +2656,9 @@ static void binder_transaction(struct binder_proc *proc,
2572 if (target_thread) { 2656 if (target_thread) {
2573 e->to_thread = target_thread->pid; 2657 e->to_thread = target_thread->pid;
2574 target_list = &target_thread->todo; 2658 target_list = &target_thread->todo;
2575 target_wait = &target_thread->wait;
2576 } else { 2659 } else {
2577 target_list = &target_proc->todo; 2660 target_list = &target_proc->todo;
2578 target_wait = &target_proc->wait; 2661 wakeup_for_proc_work = true;
2579 } 2662 }
2580 e->to_proc = target_proc->pid; 2663 e->to_proc = target_proc->pid;
2581 2664
@@ -2882,7 +2965,7 @@ static void binder_transaction(struct binder_proc *proc,
2882 binder_node_lock(target_node); 2965 binder_node_lock(target_node);
2883 if (target_node->has_async_transaction) { 2966 if (target_node->has_async_transaction) {
2884 target_list = &target_node->async_todo; 2967 target_list = &target_node->async_todo;
2885 target_wait = NULL; 2968 wakeup_for_proc_work = false;
2886 } else 2969 } else
2887 target_node->has_async_transaction = 1; 2970 target_node->has_async_transaction = 1;
2888 /* 2971 /*
@@ -2901,11 +2984,13 @@ static void binder_transaction(struct binder_proc *proc,
2901 binder_inner_proc_unlock(target_proc); 2984 binder_inner_proc_unlock(target_proc);
2902 binder_node_unlock(target_node); 2985 binder_node_unlock(target_node);
2903 } 2986 }
2904 if (target_wait) { 2987 if (target_thread) {
2905 if (reply || !(tr->flags & TF_ONE_WAY)) 2988 wake_up_interruptible_sync(&target_thread->wait);
2906 wake_up_interruptible_sync(target_wait); 2989 } else if (wakeup_for_proc_work) {
2907 else 2990 binder_inner_proc_lock(target_proc);
2908 wake_up_interruptible(target_wait); 2991 binder_wakeup_proc_ilocked(target_proc,
2992 !(tr->flags & TF_ONE_WAY));
2993 binder_inner_proc_unlock(target_proc);
2909 } 2994 }
2910 if (target_thread) 2995 if (target_thread)
2911 binder_thread_dec_tmpref(target_thread); 2996 binder_thread_dec_tmpref(target_thread);
@@ -3345,12 +3430,14 @@ static int binder_thread_write(struct binder_proc *proc,
3345 &ref->death->work, 3430 &ref->death->work,
3346 &thread->todo); 3431 &thread->todo);
3347 else { 3432 else {
3348 binder_enqueue_work( 3433 binder_inner_proc_lock(proc);
3349 proc, 3434 binder_enqueue_work_ilocked(
3350 &ref->death->work, 3435 &ref->death->work,
3351 &proc->todo); 3436 &proc->todo);
3352 wake_up_interruptible( 3437 binder_wakeup_proc_ilocked(
3353 &proc->wait); 3438 proc,
3439 false);
3440 binder_inner_proc_unlock(proc);
3354 } 3441 }
3355 } 3442 }
3356 } else { 3443 } else {
@@ -3385,8 +3472,9 @@ static int binder_thread_write(struct binder_proc *proc,
3385 binder_enqueue_work_ilocked( 3472 binder_enqueue_work_ilocked(
3386 &death->work, 3473 &death->work,
3387 &proc->todo); 3474 &proc->todo);
3388 wake_up_interruptible( 3475 binder_wakeup_proc_ilocked(
3389 &proc->wait); 3476 proc,
3477 false);
3390 } 3478 }
3391 } else { 3479 } else {
3392 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 3480 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
@@ -3441,7 +3529,8 @@ static int binder_thread_write(struct binder_proc *proc,
3441 binder_enqueue_work_ilocked( 3529 binder_enqueue_work_ilocked(
3442 &death->work, 3530 &death->work,
3443 &proc->todo); 3531 &proc->todo);
3444 wake_up_interruptible(&proc->wait); 3532 binder_wakeup_proc_ilocked(
3533 proc, false);
3445 } 3534 }
3446 } 3535 }
3447 binder_inner_proc_unlock(proc); 3536 binder_inner_proc_unlock(proc);
@@ -3468,13 +3557,6 @@ static void binder_stat_br(struct binder_proc *proc,
3468 } 3557 }
3469} 3558}
3470 3559
3471static int binder_has_proc_work(struct binder_proc *proc,
3472 struct binder_thread *thread)
3473{
3474 return !binder_worklist_empty(proc, &proc->todo) ||
3475 thread->looper_need_return;
3476}
3477
3478static int binder_has_thread_work(struct binder_thread *thread) 3560static int binder_has_thread_work(struct binder_thread *thread)
3479{ 3561{
3480 return !binder_worklist_empty(thread->proc, &thread->todo) || 3562 return !binder_worklist_empty(thread->proc, &thread->todo) ||
@@ -3512,6 +3594,38 @@ static int binder_put_node_cmd(struct binder_proc *proc,
3512 return 0; 3594 return 0;
3513} 3595}
3514 3596
3597static int binder_wait_for_work(struct binder_thread *thread,
3598 bool do_proc_work)
3599{
3600 DEFINE_WAIT(wait);
3601 struct binder_proc *proc = thread->proc;
3602 int ret = 0;
3603
3604 freezer_do_not_count();
3605 binder_inner_proc_lock(proc);
3606 for (;;) {
3607 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3608 if (binder_has_work_ilocked(thread, do_proc_work))
3609 break;
3610 if (do_proc_work)
3611 list_add(&thread->waiting_thread_node,
3612 &proc->waiting_threads);
3613 binder_inner_proc_unlock(proc);
3614 schedule();
3615 binder_inner_proc_lock(proc);
3616 list_del_init(&thread->waiting_thread_node);
3617 if (signal_pending(current)) {
3618 ret = -ERESTARTSYS;
3619 break;
3620 }
3621 }
3622 finish_wait(&thread->wait, &wait);
3623 binder_inner_proc_unlock(proc);
3624 freezer_count();
3625
3626 return ret;
3627}
3628
3515static int binder_thread_read(struct binder_proc *proc, 3629static int binder_thread_read(struct binder_proc *proc,
3516 struct binder_thread *thread, 3630 struct binder_thread *thread,
3517 binder_uintptr_t binder_buffer, size_t size, 3631 binder_uintptr_t binder_buffer, size_t size,
@@ -3532,10 +3646,7 @@ static int binder_thread_read(struct binder_proc *proc,
3532 3646
3533retry: 3647retry:
3534 binder_inner_proc_lock(proc); 3648 binder_inner_proc_lock(proc);
3535 wait_for_proc_work = thread->transaction_stack == NULL && 3649 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
3536 binder_worklist_empty_ilocked(&thread->todo);
3537 if (wait_for_proc_work)
3538 proc->ready_threads++;
3539 binder_inner_proc_unlock(proc); 3650 binder_inner_proc_unlock(proc);
3540 3651
3541 thread->looper |= BINDER_LOOPER_STATE_WAITING; 3652 thread->looper |= BINDER_LOOPER_STATE_WAITING;
@@ -3552,23 +3663,15 @@ retry:
3552 binder_stop_on_user_error < 2); 3663 binder_stop_on_user_error < 2);
3553 } 3664 }
3554 binder_set_nice(proc->default_priority); 3665 binder_set_nice(proc->default_priority);
3555 if (non_block) { 3666 }
3556 if (!binder_has_proc_work(proc, thread)) 3667
3557 ret = -EAGAIN; 3668 if (non_block) {
3558 } else 3669 if (!binder_has_work(thread, wait_for_proc_work))
3559 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); 3670 ret = -EAGAIN;
3560 } else { 3671 } else {
3561 if (non_block) { 3672 ret = binder_wait_for_work(thread, wait_for_proc_work);
3562 if (!binder_has_thread_work(thread))
3563 ret = -EAGAIN;
3564 } else
3565 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
3566 } 3673 }
3567 3674
3568 binder_inner_proc_lock(proc);
3569 if (wait_for_proc_work)
3570 proc->ready_threads--;
3571 binder_inner_proc_unlock(proc);
3572 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 3675 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3573 3676
3574 if (ret) 3677 if (ret)
@@ -3854,7 +3957,8 @@ done:
3854 3957
3855 *consumed = ptr - buffer; 3958 *consumed = ptr - buffer;
3856 binder_inner_proc_lock(proc); 3959 binder_inner_proc_lock(proc);
3857 if (proc->requested_threads + proc->ready_threads == 0 && 3960 if (proc->requested_threads == 0 &&
3961 list_empty(&thread->proc->waiting_threads) &&
3858 proc->requested_threads_started < proc->max_threads && 3962 proc->requested_threads_started < proc->max_threads &&
3859 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 3963 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3860 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 3964 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
@@ -3965,7 +4069,7 @@ static struct binder_thread *binder_get_thread_ilocked(
3965 thread->return_error.cmd = BR_OK; 4069 thread->return_error.cmd = BR_OK;
3966 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; 4070 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
3967 thread->reply_error.cmd = BR_OK; 4071 thread->reply_error.cmd = BR_OK;
3968 4072 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
3969 return thread; 4073 return thread;
3970} 4074}
3971 4075
@@ -4078,28 +4182,24 @@ static unsigned int binder_poll(struct file *filp,
4078{ 4182{
4079 struct binder_proc *proc = filp->private_data; 4183 struct binder_proc *proc = filp->private_data;
4080 struct binder_thread *thread = NULL; 4184 struct binder_thread *thread = NULL;
4081 int wait_for_proc_work; 4185 bool wait_for_proc_work;
4082 4186
4083 thread = binder_get_thread(proc); 4187 thread = binder_get_thread(proc);
4084 4188
4085 binder_inner_proc_lock(thread->proc); 4189 binder_inner_proc_lock(thread->proc);
4086 wait_for_proc_work = thread->transaction_stack == NULL && 4190 thread->looper |= BINDER_LOOPER_STATE_POLL;
4087 binder_worklist_empty_ilocked(&thread->todo); 4191 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4192
4088 binder_inner_proc_unlock(thread->proc); 4193 binder_inner_proc_unlock(thread->proc);
4089 4194
4090 if (wait_for_proc_work) { 4195 if (binder_has_work(thread, wait_for_proc_work))
4091 if (binder_has_proc_work(proc, thread)) 4196 return POLLIN;
4092 return POLLIN; 4197
4093 poll_wait(filp, &proc->wait, wait); 4198 poll_wait(filp, &thread->wait, wait);
4094 if (binder_has_proc_work(proc, thread)) 4199
4095 return POLLIN; 4200 if (binder_has_thread_work(thread))
4096 } else { 4201 return POLLIN;
4097 if (binder_has_thread_work(thread)) 4202
4098 return POLLIN;
4099 poll_wait(filp, &thread->wait, wait);
4100 if (binder_has_thread_work(thread))
4101 return POLLIN;
4102 }
4103 return 0; 4203 return 0;
4104} 4204}
4105 4205
@@ -4146,8 +4246,10 @@ static int binder_ioctl_write_read(struct file *filp,
4146 &bwr.read_consumed, 4246 &bwr.read_consumed,
4147 filp->f_flags & O_NONBLOCK); 4247 filp->f_flags & O_NONBLOCK);
4148 trace_binder_read_done(ret); 4248 trace_binder_read_done(ret);
4149 if (!binder_worklist_empty(proc, &proc->todo)) 4249 binder_inner_proc_lock(proc);
4150 wake_up_interruptible(&proc->wait); 4250 if (!binder_worklist_empty_ilocked(&proc->todo))
4251 binder_wakeup_proc_ilocked(proc, false);
4252 binder_inner_proc_unlock(proc);
4151 if (ret < 0) { 4253 if (ret < 0) {
4152 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4254 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4153 ret = -EFAULT; 4255 ret = -EFAULT;
@@ -4389,7 +4491,6 @@ static int binder_open(struct inode *nodp, struct file *filp)
4389 get_task_struct(current->group_leader); 4491 get_task_struct(current->group_leader);
4390 proc->tsk = current->group_leader; 4492 proc->tsk = current->group_leader;
4391 INIT_LIST_HEAD(&proc->todo); 4493 INIT_LIST_HEAD(&proc->todo);
4392 init_waitqueue_head(&proc->wait);
4393 proc->default_priority = task_nice(current); 4494 proc->default_priority = task_nice(current);
4394 binder_dev = container_of(filp->private_data, struct binder_device, 4495 binder_dev = container_of(filp->private_data, struct binder_device,
4395 miscdev); 4496 miscdev);
@@ -4399,6 +4500,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
4399 binder_stats_created(BINDER_STAT_PROC); 4500 binder_stats_created(BINDER_STAT_PROC);
4400 proc->pid = current->group_leader->pid; 4501 proc->pid = current->group_leader->pid;
4401 INIT_LIST_HEAD(&proc->delivered_death); 4502 INIT_LIST_HEAD(&proc->delivered_death);
4503 INIT_LIST_HEAD(&proc->waiting_threads);
4402 filp->private_data = proc; 4504 filp->private_data = proc;
4403 4505
4404 mutex_lock(&binder_procs_lock); 4506 mutex_lock(&binder_procs_lock);
@@ -4450,7 +4552,6 @@ static void binder_deferred_flush(struct binder_proc *proc)
4450 } 4552 }
4451 } 4553 }
4452 binder_inner_proc_unlock(proc); 4554 binder_inner_proc_unlock(proc);
4453 wake_up_interruptible_all(&proc->wait);
4454 4555
4455 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 4556 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4456 "binder_flush: %d woke %d threads\n", proc->pid, 4557 "binder_flush: %d woke %d threads\n", proc->pid,
@@ -4519,7 +4620,7 @@ static int binder_node_release(struct binder_node *node, int refs)
4519 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 4620 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4520 binder_enqueue_work_ilocked(&ref->death->work, 4621 binder_enqueue_work_ilocked(&ref->death->work,
4521 &ref->proc->todo); 4622 &ref->proc->todo);
4522 wake_up_interruptible(&ref->proc->wait); 4623 binder_wakeup_proc_ilocked(ref->proc, false);
4523 binder_inner_proc_unlock(ref->proc); 4624 binder_inner_proc_unlock(ref->proc);
4524 } 4625 }
4525 4626
@@ -5007,23 +5108,29 @@ static void print_binder_proc_stats(struct seq_file *m,
5007 struct binder_proc *proc) 5108 struct binder_proc *proc)
5008{ 5109{
5009 struct binder_work *w; 5110 struct binder_work *w;
5111 struct binder_thread *thread;
5010 struct rb_node *n; 5112 struct rb_node *n;
5011 int count, strong, weak; 5113 int count, strong, weak, ready_threads;
5012 size_t free_async_space = 5114 size_t free_async_space =
5013 binder_alloc_get_free_async_space(&proc->alloc); 5115 binder_alloc_get_free_async_space(&proc->alloc);
5014 5116
5015 seq_printf(m, "proc %d\n", proc->pid); 5117 seq_printf(m, "proc %d\n", proc->pid);
5016 seq_printf(m, "context %s\n", proc->context->name); 5118 seq_printf(m, "context %s\n", proc->context->name);
5017 count = 0; 5119 count = 0;
5120 ready_threads = 0;
5018 binder_inner_proc_lock(proc); 5121 binder_inner_proc_lock(proc);
5019 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 5122 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5020 count++; 5123 count++;
5124
5125 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5126 ready_threads++;
5127
5021 seq_printf(m, " threads: %d\n", count); 5128 seq_printf(m, " threads: %d\n", count);
5022 seq_printf(m, " requested threads: %d+%d/%d\n" 5129 seq_printf(m, " requested threads: %d+%d/%d\n"
5023 " ready threads %d\n" 5130 " ready threads %d\n"
5024 " free async space %zd\n", proc->requested_threads, 5131 " free async space %zd\n", proc->requested_threads,
5025 proc->requested_threads_started, proc->max_threads, 5132 proc->requested_threads_started, proc->max_threads,
5026 proc->ready_threads, 5133 ready_threads,
5027 free_async_space); 5134 free_async_space);
5028 count = 0; 5135 count = 0;
5029 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 5136 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))