aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartijn Coenen <maco@android.com>2017-08-31 04:04:19 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-09-01 03:20:13 -0400
commit408c68b17aea2f23236cdb49b6c060e0ded846ed (patch)
treee09d52ca9dabe777583ca37e7a10e5f6b210768e
parent1b77e9dcc3da9359f5936a7a4a0b5b6585c5e37e (diff)
ANDROID: binder: push new transactions to waiting threads.
Instead of pushing new transactions to the process waitqueue, select a thread that is waiting on proc work to handle the transaction. This will make it easier to improve priority inheritance in future patches, by setting the priority before we wake up a thread. If we can't find a waiting thread, submit the work to the proc waitqueue instead as we did previously. Signed-off-by: Martijn Coenen <maco@android.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/android/binder.c181
1 files changed, 127 insertions, 54 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 55a44c0b3b20..8f2031c52ea4 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -970,7 +970,20 @@ static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
970 } 970 }
971} 971}
972 972
973static void binder_wakeup_proc_ilocked(struct binder_proc *proc, bool sync) 973/**
974 * binder_select_thread_ilocked() - selects a thread for doing proc work.
975 * @proc: process to select a thread from
976 *
977 * Note that calling this function moves the thread off the waiting_threads
978 * list, so it can only be woken up by the caller of this function, or a
979 * signal. Therefore, callers *should* always wake up the thread this function
980 * returns.
981 *
982 * Return: If there's a thread currently waiting for process work,
983 * returns that thread. Otherwise returns NULL.
984 */
985static struct binder_thread *
986binder_select_thread_ilocked(struct binder_proc *proc)
974{ 987{
975 struct binder_thread *thread; 988 struct binder_thread *thread;
976 989
@@ -979,8 +992,35 @@ static void binder_wakeup_proc_ilocked(struct binder_proc *proc, bool sync)
979 struct binder_thread, 992 struct binder_thread,
980 waiting_thread_node); 993 waiting_thread_node);
981 994
982 if (thread) { 995 if (thread)
983 list_del_init(&thread->waiting_thread_node); 996 list_del_init(&thread->waiting_thread_node);
997
998 return thread;
999}
1000
1001/**
1002 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1003 * @proc: process to wake up a thread in
1004 * @thread: specific thread to wake-up (may be NULL)
1005 * @sync: whether to do a synchronous wake-up
1006 *
1007 * This function wakes up a thread in the @proc process.
1008 * The caller may provide a specific thread to wake-up in
1009 * the @thread parameter. If @thread is NULL, this function
1010 * will wake up threads that have called poll().
1011 *
1012 * Note that for this function to work as expected, callers
1013 * should first call binder_select_thread() to find a thread
1014 * to handle the work (if they don't have a thread already),
1015 * and pass the result into the @thread parameter.
1016 */
1017static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1018 struct binder_thread *thread,
1019 bool sync)
1020{
1021 BUG_ON(!spin_is_locked(&proc->inner_lock));
1022
1023 if (thread) {
984 if (sync) 1024 if (sync)
985 wake_up_interruptible_sync(&thread->wait); 1025 wake_up_interruptible_sync(&thread->wait);
986 else 1026 else
@@ -1004,6 +1044,13 @@ static void binder_wakeup_proc_ilocked(struct binder_proc *proc, bool sync)
1004 binder_wakeup_poll_threads_ilocked(proc, sync); 1044 binder_wakeup_poll_threads_ilocked(proc, sync);
1005} 1045}
1006 1046
1047static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1048{
1049 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1050
1051 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1052}
1053
1007static void binder_set_nice(long nice) 1054static void binder_set_nice(long nice)
1008{ 1055{
1009 long min_nice; 1056 long min_nice;
@@ -1222,7 +1269,7 @@ static bool binder_dec_node_nilocked(struct binder_node *node,
1222 if (proc && (node->has_strong_ref || node->has_weak_ref)) { 1269 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1223 if (list_empty(&node->work.entry)) { 1270 if (list_empty(&node->work.entry)) {
1224 binder_enqueue_work_ilocked(&node->work, &proc->todo); 1271 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1225 binder_wakeup_proc_ilocked(proc, false); 1272 binder_wakeup_proc_ilocked(proc);
1226 } 1273 }
1227 } else { 1274 } else {
1228 if (hlist_empty(&node->refs) && !node->local_strong_refs && 1275 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
@@ -2468,6 +2515,73 @@ static int binder_fixup_parent(struct binder_transaction *t,
2468 return 0; 2515 return 0;
2469} 2516}
2470 2517
2518/**
2519 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2520 * @t: transaction to send
2521 * @proc: process to send the transaction to
2522 * @thread: thread in @proc to send the transaction to (may be NULL)
2523 *
2524 * This function queues a transaction to the specified process. It will try
2525 * to find a thread in the target process to handle the transaction and
2526 * wake it up. If no thread is found, the work is queued to the proc
2527 * waitqueue.
2528 *
2529 * If the @thread parameter is not NULL, the transaction is always queued
2530 * to the waitlist of that specific thread.
2531 *
2532 * Return: true if the transactions was successfully queued
2533 * false if the target process or thread is dead
2534 */
2535static bool binder_proc_transaction(struct binder_transaction *t,
2536 struct binder_proc *proc,
2537 struct binder_thread *thread)
2538{
2539 struct list_head *target_list = NULL;
2540 struct binder_node *node = t->buffer->target_node;
2541 bool oneway = !!(t->flags & TF_ONE_WAY);
2542 bool wakeup = true;
2543
2544 BUG_ON(!node);
2545 binder_node_lock(node);
2546 if (oneway) {
2547 BUG_ON(thread);
2548 if (node->has_async_transaction) {
2549 target_list = &node->async_todo;
2550 wakeup = false;
2551 } else {
2552 node->has_async_transaction = 1;
2553 }
2554 }
2555
2556 binder_inner_proc_lock(proc);
2557
2558 if (proc->is_dead || (thread && thread->is_dead)) {
2559 binder_inner_proc_unlock(proc);
2560 binder_node_unlock(node);
2561 return false;
2562 }
2563
2564 if (!thread && !target_list)
2565 thread = binder_select_thread_ilocked(proc);
2566
2567 if (thread)
2568 target_list = &thread->todo;
2569 else if (!target_list)
2570 target_list = &proc->todo;
2571 else
2572 BUG_ON(target_list != &node->async_todo);
2573
2574 binder_enqueue_work_ilocked(&t->work, target_list);
2575
2576 if (wakeup)
2577 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2578
2579 binder_inner_proc_unlock(proc);
2580 binder_node_unlock(node);
2581
2582 return true;
2583}
2584
2471static void binder_transaction(struct binder_proc *proc, 2585static void binder_transaction(struct binder_proc *proc,
2472 struct binder_thread *thread, 2586 struct binder_thread *thread,
2473 struct binder_transaction_data *tr, int reply, 2587 struct binder_transaction_data *tr, int reply,
@@ -2482,7 +2596,6 @@ static void binder_transaction(struct binder_proc *proc,
2482 struct binder_proc *target_proc = NULL; 2596 struct binder_proc *target_proc = NULL;
2483 struct binder_thread *target_thread = NULL; 2597 struct binder_thread *target_thread = NULL;
2484 struct binder_node *target_node = NULL; 2598 struct binder_node *target_node = NULL;
2485 struct list_head *target_list;
2486 struct binder_transaction *in_reply_to = NULL; 2599 struct binder_transaction *in_reply_to = NULL;
2487 struct binder_transaction_log_entry *e; 2600 struct binder_transaction_log_entry *e;
2488 uint32_t return_error = 0; 2601 uint32_t return_error = 0;
@@ -2492,7 +2605,6 @@ static void binder_transaction(struct binder_proc *proc,
2492 binder_size_t last_fixup_min_off = 0; 2605 binder_size_t last_fixup_min_off = 0;
2493 struct binder_context *context = proc->context; 2606 struct binder_context *context = proc->context;
2494 int t_debug_id = atomic_inc_return(&binder_last_id); 2607 int t_debug_id = atomic_inc_return(&binder_last_id);
2495 bool wakeup_for_proc_work = false;
2496 2608
2497 e = binder_transaction_log_add(&binder_transaction_log); 2609 e = binder_transaction_log_add(&binder_transaction_log);
2498 e->debug_id = t_debug_id; 2610 e->debug_id = t_debug_id;
@@ -2653,13 +2765,8 @@ static void binder_transaction(struct binder_proc *proc,
2653 } 2765 }
2654 binder_inner_proc_unlock(proc); 2766 binder_inner_proc_unlock(proc);
2655 } 2767 }
2656 if (target_thread) { 2768 if (target_thread)
2657 e->to_thread = target_thread->pid; 2769 e->to_thread = target_thread->pid;
2658 target_list = &target_thread->todo;
2659 } else {
2660 target_list = &target_proc->todo;
2661 wakeup_for_proc_work = true;
2662 }
2663 e->to_proc = target_proc->pid; 2770 e->to_proc = target_proc->pid;
2664 2771
2665 /* TODO: reuse incoming transaction for reply */ 2772 /* TODO: reuse incoming transaction for reply */
@@ -2938,8 +3045,9 @@ static void binder_transaction(struct binder_proc *proc,
2938 } 3045 }
2939 BUG_ON(t->buffer->async_transaction != 0); 3046 BUG_ON(t->buffer->async_transaction != 0);
2940 binder_pop_transaction_ilocked(target_thread, in_reply_to); 3047 binder_pop_transaction_ilocked(target_thread, in_reply_to);
2941 binder_enqueue_work_ilocked(&t->work, target_list); 3048 binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
2942 binder_inner_proc_unlock(target_proc); 3049 binder_inner_proc_unlock(target_proc);
3050 wake_up_interruptible_sync(&target_thread->wait);
2943 binder_free_transaction(in_reply_to); 3051 binder_free_transaction(in_reply_to);
2944 } else if (!(t->flags & TF_ONE_WAY)) { 3052 } else if (!(t->flags & TF_ONE_WAY)) {
2945 BUG_ON(t->buffer->async_transaction != 0); 3053 BUG_ON(t->buffer->async_transaction != 0);
@@ -2948,49 +3056,17 @@ static void binder_transaction(struct binder_proc *proc,
2948 t->from_parent = thread->transaction_stack; 3056 t->from_parent = thread->transaction_stack;
2949 thread->transaction_stack = t; 3057 thread->transaction_stack = t;
2950 binder_inner_proc_unlock(proc); 3058 binder_inner_proc_unlock(proc);
2951 binder_inner_proc_lock(target_proc); 3059 if (!binder_proc_transaction(t, target_proc, target_thread)) {
2952 if (target_proc->is_dead ||
2953 (target_thread && target_thread->is_dead)) {
2954 binder_inner_proc_unlock(target_proc);
2955 binder_inner_proc_lock(proc); 3060 binder_inner_proc_lock(proc);
2956 binder_pop_transaction_ilocked(thread, t); 3061 binder_pop_transaction_ilocked(thread, t);
2957 binder_inner_proc_unlock(proc); 3062 binder_inner_proc_unlock(proc);
2958 goto err_dead_proc_or_thread; 3063 goto err_dead_proc_or_thread;
2959 } 3064 }
2960 binder_enqueue_work_ilocked(&t->work, target_list);
2961 binder_inner_proc_unlock(target_proc);
2962 } else { 3065 } else {
2963 BUG_ON(target_node == NULL); 3066 BUG_ON(target_node == NULL);
2964 BUG_ON(t->buffer->async_transaction != 1); 3067 BUG_ON(t->buffer->async_transaction != 1);
2965 binder_node_lock(target_node); 3068 if (!binder_proc_transaction(t, target_proc, NULL))
2966 if (target_node->has_async_transaction) {
2967 target_list = &target_node->async_todo;
2968 wakeup_for_proc_work = false;
2969 } else
2970 target_node->has_async_transaction = 1;
2971 /*
2972 * Test/set of has_async_transaction
2973 * must be atomic with enqueue on
2974 * async_todo
2975 */
2976 binder_inner_proc_lock(target_proc);
2977 if (target_proc->is_dead ||
2978 (target_thread && target_thread->is_dead)) {
2979 binder_inner_proc_unlock(target_proc);
2980 binder_node_unlock(target_node);
2981 goto err_dead_proc_or_thread; 3069 goto err_dead_proc_or_thread;
2982 }
2983 binder_enqueue_work_ilocked(&t->work, target_list);
2984 binder_inner_proc_unlock(target_proc);
2985 binder_node_unlock(target_node);
2986 }
2987 if (target_thread) {
2988 wake_up_interruptible_sync(&target_thread->wait);
2989 } else if (wakeup_for_proc_work) {
2990 binder_inner_proc_lock(target_proc);
2991 binder_wakeup_proc_ilocked(target_proc,
2992 !(tr->flags & TF_ONE_WAY));
2993 binder_inner_proc_unlock(target_proc);
2994 } 3070 }
2995 if (target_thread) 3071 if (target_thread)
2996 binder_thread_dec_tmpref(target_thread); 3072 binder_thread_dec_tmpref(target_thread);
@@ -3435,8 +3511,7 @@ static int binder_thread_write(struct binder_proc *proc,
3435 &ref->death->work, 3511 &ref->death->work,
3436 &proc->todo); 3512 &proc->todo);
3437 binder_wakeup_proc_ilocked( 3513 binder_wakeup_proc_ilocked(
3438 proc, 3514 proc);
3439 false);
3440 binder_inner_proc_unlock(proc); 3515 binder_inner_proc_unlock(proc);
3441 } 3516 }
3442 } 3517 }
@@ -3473,8 +3548,7 @@ static int binder_thread_write(struct binder_proc *proc,
3473 &death->work, 3548 &death->work,
3474 &proc->todo); 3549 &proc->todo);
3475 binder_wakeup_proc_ilocked( 3550 binder_wakeup_proc_ilocked(
3476 proc, 3551 proc);
3477 false);
3478 } 3552 }
3479 } else { 3553 } else {
3480 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 3554 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
@@ -3529,8 +3603,7 @@ static int binder_thread_write(struct binder_proc *proc,
3529 binder_enqueue_work_ilocked( 3603 binder_enqueue_work_ilocked(
3530 &death->work, 3604 &death->work,
3531 &proc->todo); 3605 &proc->todo);
3532 binder_wakeup_proc_ilocked( 3606 binder_wakeup_proc_ilocked(proc);
3533 proc, false);
3534 } 3607 }
3535 } 3608 }
3536 binder_inner_proc_unlock(proc); 3609 binder_inner_proc_unlock(proc);
@@ -4248,7 +4321,7 @@ static int binder_ioctl_write_read(struct file *filp,
4248 trace_binder_read_done(ret); 4321 trace_binder_read_done(ret);
4249 binder_inner_proc_lock(proc); 4322 binder_inner_proc_lock(proc);
4250 if (!binder_worklist_empty_ilocked(&proc->todo)) 4323 if (!binder_worklist_empty_ilocked(&proc->todo))
4251 binder_wakeup_proc_ilocked(proc, false); 4324 binder_wakeup_proc_ilocked(proc);
4252 binder_inner_proc_unlock(proc); 4325 binder_inner_proc_unlock(proc);
4253 if (ret < 0) { 4326 if (ret < 0) {
4254 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 4327 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
@@ -4620,7 +4693,7 @@ static int binder_node_release(struct binder_node *node, int refs)
4620 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 4693 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4621 binder_enqueue_work_ilocked(&ref->death->work, 4694 binder_enqueue_work_ilocked(&ref->death->work,
4622 &ref->proc->todo); 4695 &ref->proc->todo);
4623 binder_wakeup_proc_ilocked(ref->proc, false); 4696 binder_wakeup_proc_ilocked(ref->proc);
4624 binder_inner_proc_unlock(ref->proc); 4697 binder_inner_proc_unlock(ref->proc);
4625 } 4698 }
4626 4699