aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArve Hjønnevåg <arve@android.com>2009-04-06 18:13:00 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-04-17 14:06:27 -0400
commit861a0dcc397f3e94024cc311086799393ec517df (patch)
treecb0cd51f01acc1d35693df79707e533cae39139f
parent0cf24a7dc9123ddf63c413b6d4b38017b19db713 (diff)
Staging: binder: Defer flush and release operations to avoid deadlocks.
If a transaction that contains a file descriptor fails on a later object, the new file descriptor needs to be closed. If this is a binder file descriptor we would deadlock in flush. If there were no other references to the file at this point release would also be called. Signed-off-by: Arve Hjønnevåg <arve@android.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/staging/android/binder.c149
1 files changed, 88 insertions, 61 deletions
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index b0127a3290d0..299d29d1dadb 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -41,8 +41,8 @@ static int binder_last_id;
41static struct proc_dir_entry *binder_proc_dir_entry_root; 41static struct proc_dir_entry *binder_proc_dir_entry_root;
42static struct proc_dir_entry *binder_proc_dir_entry_proc; 42static struct proc_dir_entry *binder_proc_dir_entry_proc;
43static struct hlist_head binder_dead_nodes; 43static struct hlist_head binder_dead_nodes;
44static HLIST_HEAD(binder_release_files_list); 44static HLIST_HEAD(binder_deferred_list);
45static DEFINE_MUTEX(binder_release_files_lock); 45static DEFINE_MUTEX(binder_deferred_lock);
46 46
47static int binder_read_proc_proc( 47static int binder_read_proc_proc(
48 char *page, char **start, off_t off, int count, int *eof, void *data); 48 char *page, char **start, off_t off, int count, int *eof, void *data);
@@ -234,6 +234,12 @@ struct binder_buffer {
234 uint8_t data[0]; 234 uint8_t data[0];
235}; 235};
236 236
237enum {
238 BINDER_DEFERRED_PUT_FILES = 0x01,
239 BINDER_DEFERRED_FLUSH = 0x02,
240 BINDER_DEFERRED_RELEASE = 0x04,
241};
242
237struct binder_proc { 243struct binder_proc {
238 struct hlist_node proc_node; 244 struct hlist_node proc_node;
239 struct rb_root threads; 245 struct rb_root threads;
@@ -244,7 +250,8 @@ struct binder_proc {
244 struct vm_area_struct *vma; 250 struct vm_area_struct *vma;
245 struct task_struct *tsk; 251 struct task_struct *tsk;
246 struct files_struct *files; 252 struct files_struct *files;
247 struct hlist_node release_files_node; 253 struct hlist_node deferred_work_node;
254 int deferred_work;
248 void *buffer; 255 void *buffer;
249 ptrdiff_t user_buffer_offset; 256 ptrdiff_t user_buffer_offset;
250 257
@@ -310,6 +317,8 @@ struct binder_transaction {
310 uid_t sender_euid; 317 uid_t sender_euid;
311}; 318};
312 319
320static void binder_defer_work(struct binder_proc *proc, int defer);
321
313/* 322/*
314 * copied from get_unused_fd_flags 323 * copied from get_unused_fd_flags
315 */ 324 */
@@ -2677,33 +2686,6 @@ static void binder_vma_open(struct vm_area_struct *vma)
2677 dump_stack(); 2686 dump_stack();
2678} 2687}
2679 2688
2680static void binder_release_files(struct work_struct *work)
2681{
2682 struct binder_proc *proc;
2683 struct files_struct *files;
2684 do {
2685 mutex_lock(&binder_lock);
2686 mutex_lock(&binder_release_files_lock);
2687 if (!hlist_empty(&binder_release_files_list)) {
2688 proc = hlist_entry(binder_release_files_list.first,
2689 struct binder_proc, release_files_node);
2690 hlist_del_init(&proc->release_files_node);
2691 files = proc->files;
2692 if (files)
2693 proc->files = NULL;
2694 } else {
2695 proc = NULL;
2696 files = NULL;
2697 }
2698 mutex_unlock(&binder_release_files_lock);
2699 mutex_unlock(&binder_lock);
2700 if (files)
2701 put_files_struct(files);
2702 } while (proc);
2703}
2704
2705static DECLARE_WORK(binder_release_files_work, binder_release_files);
2706
2707static void binder_vma_close(struct vm_area_struct *vma) 2689static void binder_vma_close(struct vm_area_struct *vma)
2708{ 2690{
2709 struct binder_proc *proc = vma->vm_private_data; 2691 struct binder_proc *proc = vma->vm_private_data;
@@ -2714,13 +2696,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
2714 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2696 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2715 (unsigned long)pgprot_val(vma->vm_page_prot)); 2697 (unsigned long)pgprot_val(vma->vm_page_prot));
2716 proc->vma = NULL; 2698 proc->vma = NULL;
2717 mutex_lock(&binder_release_files_lock); 2699 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
2718 if (proc->files) {
2719 hlist_add_head(&proc->release_files_node,
2720 &binder_release_files_list);
2721 schedule_work(&binder_release_files_work);
2722 }
2723 mutex_unlock(&binder_release_files_lock);
2724} 2700}
2725 2701
2726static struct vm_operations_struct binder_vm_ops = { 2702static struct vm_operations_struct binder_vm_ops = {
@@ -2853,11 +2829,17 @@ static int binder_open(struct inode *nodp, struct file *filp)
2853 2829
2854static int binder_flush(struct file *filp, fl_owner_t id) 2830static int binder_flush(struct file *filp, fl_owner_t id)
2855{ 2831{
2856 struct rb_node *n;
2857 struct binder_proc *proc = filp->private_data; 2832 struct binder_proc *proc = filp->private_data;
2858 int wake_count = 0;
2859 2833
2860 mutex_lock(&binder_lock); 2834 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
2835
2836 return 0;
2837}
2838
2839static void binder_deferred_flush(struct binder_proc *proc)
2840{
2841 struct rb_node *n;
2842 int wake_count = 0;
2861 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 2843 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
2862 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 2844 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
2863 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 2845 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
@@ -2867,36 +2849,34 @@ static int binder_flush(struct file *filp, fl_owner_t id)
2867 } 2849 }
2868 } 2850 }
2869 wake_up_interruptible_all(&proc->wait); 2851 wake_up_interruptible_all(&proc->wait);
2870 mutex_unlock(&binder_lock);
2871 2852
2872 if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) 2853 if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
2873 printk(KERN_INFO "binder_flush: %d woke %d threads\n", proc->pid, wake_count); 2854 printk(KERN_INFO "binder_flush: %d woke %d threads\n", proc->pid, wake_count);
2874
2875 return 0;
2876} 2855}
2877 2856
2878static int binder_release(struct inode *nodp, struct file *filp) 2857static int binder_release(struct inode *nodp, struct file *filp)
2879{ 2858{
2880 struct hlist_node *pos;
2881 struct binder_transaction *t;
2882 struct rb_node *n;
2883 struct files_struct *files;
2884 struct binder_proc *proc = filp->private_data; 2859 struct binder_proc *proc = filp->private_data;
2885 int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
2886
2887 if (binder_proc_dir_entry_proc) { 2860 if (binder_proc_dir_entry_proc) {
2888 char strbuf[11]; 2861 char strbuf[11];
2889 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 2862 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
2890 remove_proc_entry(strbuf, binder_proc_dir_entry_proc); 2863 remove_proc_entry(strbuf, binder_proc_dir_entry_proc);
2891 } 2864 }
2892 mutex_lock(&binder_lock); 2865
2893 mutex_lock(&binder_release_files_lock); 2866 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
2894 if (!hlist_unhashed(&proc->release_files_node)) 2867
2895 hlist_del(&proc->release_files_node); 2868 return 0;
2896 files = proc->files; 2869}
2897 if (files) 2870
2898 proc->files = NULL; 2871static void binder_deferred_release(struct binder_proc *proc)
2899 mutex_unlock(&binder_release_files_lock); 2872{
2873 struct hlist_node *pos;
2874 struct binder_transaction *t;
2875 struct rb_node *n;
2876 int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
2877
2878 BUG_ON(proc->vma);
2879 BUG_ON(proc->files);
2900 2880
2901 hlist_del(&proc->proc_node); 2881 hlist_del(&proc->proc_node);
2902 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { 2882 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
@@ -2971,7 +2951,6 @@ static int binder_release(struct inode *nodp, struct file *filp)
2971 } 2951 }
2972 2952
2973 binder_stats.obj_deleted[BINDER_STAT_PROC]++; 2953 binder_stats.obj_deleted[BINDER_STAT_PROC]++;
2974 mutex_unlock(&binder_lock);
2975 2954
2976 page_count = 0; 2955 page_count = 0;
2977 if (proc->pages) { 2956 if (proc->pages) {
@@ -2995,9 +2974,57 @@ static int binder_release(struct inode *nodp, struct file *filp)
2995 proc->pid, threads, nodes, incoming_refs, outgoing_refs, active_transactions, buffers, page_count); 2974 proc->pid, threads, nodes, incoming_refs, outgoing_refs, active_transactions, buffers, page_count);
2996 2975
2997 kfree(proc); 2976 kfree(proc);
2998 if (files) 2977}
2999 put_files_struct(files); 2978
3000 return 0; 2979static void binder_deferred_func(struct work_struct *work)
2980{
2981 struct binder_proc *proc;
2982 struct files_struct *files;
2983
2984 int defer;
2985 do {
2986 mutex_lock(&binder_lock);
2987 mutex_lock(&binder_deferred_lock);
2988 if (!hlist_empty(&binder_deferred_list)) {
2989 proc = hlist_entry(binder_deferred_list.first,
2990 struct binder_proc, deferred_work_node);
2991 hlist_del_init(&proc->deferred_work_node);
2992 defer = proc->deferred_work;
2993 proc->deferred_work = 0;
2994 } else {
2995 proc = NULL;
2996 defer = 0;
2997 }
2998 mutex_unlock(&binder_deferred_lock);
2999
3000 files = NULL;
3001 if (defer & BINDER_DEFERRED_PUT_FILES)
3002 if ((files = proc->files))
3003 proc->files = NULL;
3004
3005 if (defer & BINDER_DEFERRED_FLUSH)
3006 binder_deferred_flush(proc);
3007
3008 if (defer & BINDER_DEFERRED_RELEASE)
3009 binder_deferred_release(proc); /* frees proc */
3010
3011 mutex_unlock(&binder_lock);
3012 if (files)
3013 put_files_struct(files);
3014 } while (proc);
3015}
3016static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3017
3018static void binder_defer_work(struct binder_proc *proc, int defer)
3019{
3020 mutex_lock(&binder_deferred_lock);
3021 proc->deferred_work |= defer;
3022 if (hlist_unhashed(&proc->deferred_work_node)) {
3023 hlist_add_head(&proc->deferred_work_node,
3024 &binder_deferred_list);
3025 schedule_work(&binder_deferred_work);
3026 }
3027 mutex_unlock(&binder_deferred_lock);
3001} 3028}
3002 3029
3003static char *print_binder_transaction(char *buf, char *end, const char *prefix, struct binder_transaction *t) 3030static char *print_binder_transaction(char *buf, char *end, const char *prefix, struct binder_transaction *t)