diff options
Diffstat (limited to 'drivers/android/binder.c')
-rw-r--r-- | drivers/android/binder.c | 182 |
1 files changed, 131 insertions, 51 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 9f1000d2a40c..cdfc87629efb 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c | |||
@@ -72,12 +72,14 @@ | |||
72 | #include <linux/spinlock.h> | 72 | #include <linux/spinlock.h> |
73 | #include <linux/ratelimit.h> | 73 | #include <linux/ratelimit.h> |
74 | #include <linux/syscalls.h> | 74 | #include <linux/syscalls.h> |
75 | #include <linux/task_work.h> | ||
75 | 76 | ||
76 | #include <uapi/linux/android/binder.h> | 77 | #include <uapi/linux/android/binder.h> |
77 | 78 | ||
78 | #include <asm/cacheflush.h> | 79 | #include <asm/cacheflush.h> |
79 | 80 | ||
80 | #include "binder_alloc.h" | 81 | #include "binder_alloc.h" |
82 | #include "binder_internal.h" | ||
81 | #include "binder_trace.h" | 83 | #include "binder_trace.h" |
82 | 84 | ||
83 | static HLIST_HEAD(binder_deferred_list); | 85 | static HLIST_HEAD(binder_deferred_list); |
@@ -94,22 +96,8 @@ static struct dentry *binder_debugfs_dir_entry_root; | |||
94 | static struct dentry *binder_debugfs_dir_entry_proc; | 96 | static struct dentry *binder_debugfs_dir_entry_proc; |
95 | static atomic_t binder_last_id; | 97 | static atomic_t binder_last_id; |
96 | 98 | ||
97 | #define BINDER_DEBUG_ENTRY(name) \ | 99 | static int proc_show(struct seq_file *m, void *unused); |
98 | static int binder_##name##_open(struct inode *inode, struct file *file) \ | 100 | DEFINE_SHOW_ATTRIBUTE(proc); |
99 | { \ | ||
100 | return single_open(file, binder_##name##_show, inode->i_private); \ | ||
101 | } \ | ||
102 | \ | ||
103 | static const struct file_operations binder_##name##_fops = { \ | ||
104 | .owner = THIS_MODULE, \ | ||
105 | .open = binder_##name##_open, \ | ||
106 | .read = seq_read, \ | ||
107 | .llseek = seq_lseek, \ | ||
108 | .release = single_release, \ | ||
109 | } | ||
110 | |||
111 | static int binder_proc_show(struct seq_file *m, void *unused); | ||
112 | BINDER_DEBUG_ENTRY(proc); | ||
113 | 101 | ||
114 | /* This is only defined in include/asm-arm/sizes.h */ | 102 | /* This is only defined in include/asm-arm/sizes.h */ |
115 | #ifndef SZ_1K | 103 | #ifndef SZ_1K |
@@ -262,20 +250,6 @@ static struct binder_transaction_log_entry *binder_transaction_log_add( | |||
262 | return e; | 250 | return e; |
263 | } | 251 | } |
264 | 252 | ||
265 | struct binder_context { | ||
266 | struct binder_node *binder_context_mgr_node; | ||
267 | struct mutex context_mgr_node_lock; | ||
268 | |||
269 | kuid_t binder_context_mgr_uid; | ||
270 | const char *name; | ||
271 | }; | ||
272 | |||
273 | struct binder_device { | ||
274 | struct hlist_node hlist; | ||
275 | struct miscdevice miscdev; | ||
276 | struct binder_context context; | ||
277 | }; | ||
278 | |||
279 | /** | 253 | /** |
280 | * struct binder_work - work enqueued on a worklist | 254 | * struct binder_work - work enqueued on a worklist |
281 | * @entry: node enqueued on list | 255 | * @entry: node enqueued on list |
@@ -660,6 +634,7 @@ struct binder_transaction { | |||
660 | #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) | 634 | #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) |
661 | static void | 635 | static void |
662 | _binder_proc_lock(struct binder_proc *proc, int line) | 636 | _binder_proc_lock(struct binder_proc *proc, int line) |
637 | __acquires(&proc->outer_lock) | ||
663 | { | 638 | { |
664 | binder_debug(BINDER_DEBUG_SPINLOCKS, | 639 | binder_debug(BINDER_DEBUG_SPINLOCKS, |
665 | "%s: line=%d\n", __func__, line); | 640 | "%s: line=%d\n", __func__, line); |
@@ -675,6 +650,7 @@ _binder_proc_lock(struct binder_proc *proc, int line) | |||
675 | #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) | 650 | #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) |
676 | static void | 651 | static void |
677 | _binder_proc_unlock(struct binder_proc *proc, int line) | 652 | _binder_proc_unlock(struct binder_proc *proc, int line) |
653 | __releases(&proc->outer_lock) | ||
678 | { | 654 | { |
679 | binder_debug(BINDER_DEBUG_SPINLOCKS, | 655 | binder_debug(BINDER_DEBUG_SPINLOCKS, |
680 | "%s: line=%d\n", __func__, line); | 656 | "%s: line=%d\n", __func__, line); |
@@ -690,6 +666,7 @@ _binder_proc_unlock(struct binder_proc *proc, int line) | |||
690 | #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) | 666 | #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) |
691 | static void | 667 | static void |
692 | _binder_inner_proc_lock(struct binder_proc *proc, int line) | 668 | _binder_inner_proc_lock(struct binder_proc *proc, int line) |
669 | __acquires(&proc->inner_lock) | ||
693 | { | 670 | { |
694 | binder_debug(BINDER_DEBUG_SPINLOCKS, | 671 | binder_debug(BINDER_DEBUG_SPINLOCKS, |
695 | "%s: line=%d\n", __func__, line); | 672 | "%s: line=%d\n", __func__, line); |
@@ -705,6 +682,7 @@ _binder_inner_proc_lock(struct binder_proc *proc, int line) | |||
705 | #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) | 682 | #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) |
706 | static void | 683 | static void |
707 | _binder_inner_proc_unlock(struct binder_proc *proc, int line) | 684 | _binder_inner_proc_unlock(struct binder_proc *proc, int line) |
685 | __releases(&proc->inner_lock) | ||
708 | { | 686 | { |
709 | binder_debug(BINDER_DEBUG_SPINLOCKS, | 687 | binder_debug(BINDER_DEBUG_SPINLOCKS, |
710 | "%s: line=%d\n", __func__, line); | 688 | "%s: line=%d\n", __func__, line); |
@@ -720,6 +698,7 @@ _binder_inner_proc_unlock(struct binder_proc *proc, int line) | |||
720 | #define binder_node_lock(node) _binder_node_lock(node, __LINE__) | 698 | #define binder_node_lock(node) _binder_node_lock(node, __LINE__) |
721 | static void | 699 | static void |
722 | _binder_node_lock(struct binder_node *node, int line) | 700 | _binder_node_lock(struct binder_node *node, int line) |
701 | __acquires(&node->lock) | ||
723 | { | 702 | { |
724 | binder_debug(BINDER_DEBUG_SPINLOCKS, | 703 | binder_debug(BINDER_DEBUG_SPINLOCKS, |
725 | "%s: line=%d\n", __func__, line); | 704 | "%s: line=%d\n", __func__, line); |
@@ -735,6 +714,7 @@ _binder_node_lock(struct binder_node *node, int line) | |||
735 | #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) | 714 | #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) |
736 | static void | 715 | static void |
737 | _binder_node_unlock(struct binder_node *node, int line) | 716 | _binder_node_unlock(struct binder_node *node, int line) |
717 | __releases(&node->lock) | ||
738 | { | 718 | { |
739 | binder_debug(BINDER_DEBUG_SPINLOCKS, | 719 | binder_debug(BINDER_DEBUG_SPINLOCKS, |
740 | "%s: line=%d\n", __func__, line); | 720 | "%s: line=%d\n", __func__, line); |
@@ -751,12 +731,16 @@ _binder_node_unlock(struct binder_node *node, int line) | |||
751 | #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) | 731 | #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) |
752 | static void | 732 | static void |
753 | _binder_node_inner_lock(struct binder_node *node, int line) | 733 | _binder_node_inner_lock(struct binder_node *node, int line) |
734 | __acquires(&node->lock) __acquires(&node->proc->inner_lock) | ||
754 | { | 735 | { |
755 | binder_debug(BINDER_DEBUG_SPINLOCKS, | 736 | binder_debug(BINDER_DEBUG_SPINLOCKS, |
756 | "%s: line=%d\n", __func__, line); | 737 | "%s: line=%d\n", __func__, line); |
757 | spin_lock(&node->lock); | 738 | spin_lock(&node->lock); |
758 | if (node->proc) | 739 | if (node->proc) |
759 | binder_inner_proc_lock(node->proc); | 740 | binder_inner_proc_lock(node->proc); |
741 | else | ||
742 | /* annotation for sparse */ | ||
743 | __acquire(&node->proc->inner_lock); | ||
760 | } | 744 | } |
761 | 745 | ||
762 | /** | 746 | /** |
@@ -768,6 +752,7 @@ _binder_node_inner_lock(struct binder_node *node, int line) | |||
768 | #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) | 752 | #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) |
769 | static void | 753 | static void |
770 | _binder_node_inner_unlock(struct binder_node *node, int line) | 754 | _binder_node_inner_unlock(struct binder_node *node, int line) |
755 | __releases(&node->lock) __releases(&node->proc->inner_lock) | ||
771 | { | 756 | { |
772 | struct binder_proc *proc = node->proc; | 757 | struct binder_proc *proc = node->proc; |
773 | 758 | ||
@@ -775,6 +760,9 @@ _binder_node_inner_unlock(struct binder_node *node, int line) | |||
775 | "%s: line=%d\n", __func__, line); | 760 | "%s: line=%d\n", __func__, line); |
776 | if (proc) | 761 | if (proc) |
777 | binder_inner_proc_unlock(proc); | 762 | binder_inner_proc_unlock(proc); |
763 | else | ||
764 | /* annotation for sparse */ | ||
765 | __release(&node->proc->inner_lock); | ||
778 | spin_unlock(&node->lock); | 766 | spin_unlock(&node->lock); |
779 | } | 767 | } |
780 | 768 | ||
@@ -1384,10 +1372,14 @@ static void binder_dec_node_tmpref(struct binder_node *node) | |||
1384 | binder_node_inner_lock(node); | 1372 | binder_node_inner_lock(node); |
1385 | if (!node->proc) | 1373 | if (!node->proc) |
1386 | spin_lock(&binder_dead_nodes_lock); | 1374 | spin_lock(&binder_dead_nodes_lock); |
1375 | else | ||
1376 | __acquire(&binder_dead_nodes_lock); | ||
1387 | node->tmp_refs--; | 1377 | node->tmp_refs--; |
1388 | BUG_ON(node->tmp_refs < 0); | 1378 | BUG_ON(node->tmp_refs < 0); |
1389 | if (!node->proc) | 1379 | if (!node->proc) |
1390 | spin_unlock(&binder_dead_nodes_lock); | 1380 | spin_unlock(&binder_dead_nodes_lock); |
1381 | else | ||
1382 | __release(&binder_dead_nodes_lock); | ||
1391 | /* | 1383 | /* |
1392 | * Call binder_dec_node() to check if all refcounts are 0 | 1384 | * Call binder_dec_node() to check if all refcounts are 0 |
1393 | * and cleanup is needed. Calling with strong=0 and internal=1 | 1385 | * and cleanup is needed. Calling with strong=0 and internal=1 |
@@ -1890,18 +1882,22 @@ static struct binder_thread *binder_get_txn_from( | |||
1890 | */ | 1882 | */ |
1891 | static struct binder_thread *binder_get_txn_from_and_acq_inner( | 1883 | static struct binder_thread *binder_get_txn_from_and_acq_inner( |
1892 | struct binder_transaction *t) | 1884 | struct binder_transaction *t) |
1885 | __acquires(&t->from->proc->inner_lock) | ||
1893 | { | 1886 | { |
1894 | struct binder_thread *from; | 1887 | struct binder_thread *from; |
1895 | 1888 | ||
1896 | from = binder_get_txn_from(t); | 1889 | from = binder_get_txn_from(t); |
1897 | if (!from) | 1890 | if (!from) { |
1891 | __acquire(&from->proc->inner_lock); | ||
1898 | return NULL; | 1892 | return NULL; |
1893 | } | ||
1899 | binder_inner_proc_lock(from->proc); | 1894 | binder_inner_proc_lock(from->proc); |
1900 | if (t->from) { | 1895 | if (t->from) { |
1901 | BUG_ON(from != t->from); | 1896 | BUG_ON(from != t->from); |
1902 | return from; | 1897 | return from; |
1903 | } | 1898 | } |
1904 | binder_inner_proc_unlock(from->proc); | 1899 | binder_inner_proc_unlock(from->proc); |
1900 | __acquire(&from->proc->inner_lock); | ||
1905 | binder_thread_dec_tmpref(from); | 1901 | binder_thread_dec_tmpref(from); |
1906 | return NULL; | 1902 | return NULL; |
1907 | } | 1903 | } |
@@ -1973,6 +1969,8 @@ static void binder_send_failed_reply(struct binder_transaction *t, | |||
1973 | binder_thread_dec_tmpref(target_thread); | 1969 | binder_thread_dec_tmpref(target_thread); |
1974 | binder_free_transaction(t); | 1970 | binder_free_transaction(t); |
1975 | return; | 1971 | return; |
1972 | } else { | ||
1973 | __release(&target_thread->proc->inner_lock); | ||
1976 | } | 1974 | } |
1977 | next = t->from_parent; | 1975 | next = t->from_parent; |
1978 | 1976 | ||
@@ -2160,6 +2158,64 @@ static bool binder_validate_fixup(struct binder_buffer *b, | |||
2160 | return (fixup_offset >= last_min_offset); | 2158 | return (fixup_offset >= last_min_offset); |
2161 | } | 2159 | } |
2162 | 2160 | ||
2161 | /** | ||
2162 | * struct binder_task_work_cb - for deferred close | ||
2163 | * | ||
2164 | * @twork: callback_head for task work | ||
2165 | * @fd: fd to close | ||
2166 | * | ||
2167 | * Structure to pass task work to be handled after | ||
2168 | * returning from binder_ioctl() via task_work_add(). | ||
2169 | */ | ||
2170 | struct binder_task_work_cb { | ||
2171 | struct callback_head twork; | ||
2172 | struct file *file; | ||
2173 | }; | ||
2174 | |||
2175 | /** | ||
2176 | * binder_do_fd_close() - close list of file descriptors | ||
2177 | * @twork: callback head for task work | ||
2178 | * | ||
2179 | * It is not safe to call ksys_close() during the binder_ioctl() | ||
2180 | * function if there is a chance that binder's own file descriptor | ||
2181 | * might be closed. This is to meet the requirements for using | ||
2182 | * fdget() (see comments for __fget_light()). Therefore use | ||
2183 | * task_work_add() to schedule the close operation once we have | ||
2184 | * returned from binder_ioctl(). This function is a callback | ||
2185 | * for that mechanism and does the actual ksys_close() on the | ||
2186 | * given file descriptor. | ||
2187 | */ | ||
2188 | static void binder_do_fd_close(struct callback_head *twork) | ||
2189 | { | ||
2190 | struct binder_task_work_cb *twcb = container_of(twork, | ||
2191 | struct binder_task_work_cb, twork); | ||
2192 | |||
2193 | fput(twcb->file); | ||
2194 | kfree(twcb); | ||
2195 | } | ||
2196 | |||
2197 | /** | ||
2198 | * binder_deferred_fd_close() - schedule a close for the given file-descriptor | ||
2199 | * @fd: file-descriptor to close | ||
2200 | * | ||
2201 | * See comments in binder_do_fd_close(). This function is used to schedule | ||
2202 | * a file-descriptor to be closed after returning from binder_ioctl(). | ||
2203 | */ | ||
2204 | static void binder_deferred_fd_close(int fd) | ||
2205 | { | ||
2206 | struct binder_task_work_cb *twcb; | ||
2207 | |||
2208 | twcb = kzalloc(sizeof(*twcb), GFP_KERNEL); | ||
2209 | if (!twcb) | ||
2210 | return; | ||
2211 | init_task_work(&twcb->twork, binder_do_fd_close); | ||
2212 | __close_fd_get_file(fd, &twcb->file); | ||
2213 | if (twcb->file) | ||
2214 | task_work_add(current, &twcb->twork, true); | ||
2215 | else | ||
2216 | kfree(twcb); | ||
2217 | } | ||
2218 | |||
2163 | static void binder_transaction_buffer_release(struct binder_proc *proc, | 2219 | static void binder_transaction_buffer_release(struct binder_proc *proc, |
2164 | struct binder_buffer *buffer, | 2220 | struct binder_buffer *buffer, |
2165 | binder_size_t *failed_at) | 2221 | binder_size_t *failed_at) |
@@ -2299,7 +2355,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, | |||
2299 | } | 2355 | } |
2300 | fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); | 2356 | fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); |
2301 | for (fd_index = 0; fd_index < fda->num_fds; fd_index++) | 2357 | for (fd_index = 0; fd_index < fda->num_fds; fd_index++) |
2302 | ksys_close(fd_array[fd_index]); | 2358 | binder_deferred_fd_close(fd_array[fd_index]); |
2303 | } break; | 2359 | } break; |
2304 | default: | 2360 | default: |
2305 | pr_err("transaction release %d bad object type %x\n", | 2361 | pr_err("transaction release %d bad object type %x\n", |
@@ -2394,11 +2450,15 @@ static int binder_translate_handle(struct flat_binder_object *fp, | |||
2394 | fp->cookie = node->cookie; | 2450 | fp->cookie = node->cookie; |
2395 | if (node->proc) | 2451 | if (node->proc) |
2396 | binder_inner_proc_lock(node->proc); | 2452 | binder_inner_proc_lock(node->proc); |
2453 | else | ||
2454 | __acquire(&node->proc->inner_lock); | ||
2397 | binder_inc_node_nilocked(node, | 2455 | binder_inc_node_nilocked(node, |
2398 | fp->hdr.type == BINDER_TYPE_BINDER, | 2456 | fp->hdr.type == BINDER_TYPE_BINDER, |
2399 | 0, NULL); | 2457 | 0, NULL); |
2400 | if (node->proc) | 2458 | if (node->proc) |
2401 | binder_inner_proc_unlock(node->proc); | 2459 | binder_inner_proc_unlock(node->proc); |
2460 | else | ||
2461 | __release(&node->proc->inner_lock); | ||
2402 | trace_binder_transaction_ref_to_node(t, node, &src_rdata); | 2462 | trace_binder_transaction_ref_to_node(t, node, &src_rdata); |
2403 | binder_debug(BINDER_DEBUG_TRANSACTION, | 2463 | binder_debug(BINDER_DEBUG_TRANSACTION, |
2404 | " ref %d desc %d -> node %d u%016llx\n", | 2464 | " ref %d desc %d -> node %d u%016llx\n", |
@@ -2762,6 +2822,8 @@ static void binder_transaction(struct binder_proc *proc, | |||
2762 | binder_set_nice(in_reply_to->saved_priority); | 2822 | binder_set_nice(in_reply_to->saved_priority); |
2763 | target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); | 2823 | target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); |
2764 | if (target_thread == NULL) { | 2824 | if (target_thread == NULL) { |
2825 | /* annotation for sparse */ | ||
2826 | __release(&target_thread->proc->inner_lock); | ||
2765 | return_error = BR_DEAD_REPLY; | 2827 | return_error = BR_DEAD_REPLY; |
2766 | return_error_line = __LINE__; | 2828 | return_error_line = __LINE__; |
2767 | goto err_dead_binder; | 2829 | goto err_dead_binder; |
@@ -3912,7 +3974,7 @@ static int binder_apply_fd_fixups(struct binder_transaction *t) | |||
3912 | } else if (ret) { | 3974 | } else if (ret) { |
3913 | u32 *fdp = (u32 *)(t->buffer->data + fixup->offset); | 3975 | u32 *fdp = (u32 *)(t->buffer->data + fixup->offset); |
3914 | 3976 | ||
3915 | ksys_close(*fdp); | 3977 | binder_deferred_fd_close(*fdp); |
3916 | } | 3978 | } |
3917 | list_del(&fixup->fixup_entry); | 3979 | list_del(&fixup->fixup_entry); |
3918 | kfree(fixup); | 3980 | kfree(fixup); |
@@ -4164,6 +4226,11 @@ retry: | |||
4164 | if (cmd == BR_DEAD_BINDER) | 4226 | if (cmd == BR_DEAD_BINDER) |
4165 | goto done; /* DEAD_BINDER notifications can cause transactions */ | 4227 | goto done; /* DEAD_BINDER notifications can cause transactions */ |
4166 | } break; | 4228 | } break; |
4229 | default: | ||
4230 | binder_inner_proc_unlock(proc); | ||
4231 | pr_err("%d:%d: bad work type %d\n", | ||
4232 | proc->pid, thread->pid, w->type); | ||
4233 | break; | ||
4167 | } | 4234 | } |
4168 | 4235 | ||
4169 | if (!t) | 4236 | if (!t) |
@@ -4467,6 +4534,8 @@ static int binder_thread_release(struct binder_proc *proc, | |||
4467 | spin_lock(&t->lock); | 4534 | spin_lock(&t->lock); |
4468 | if (t->to_thread == thread) | 4535 | if (t->to_thread == thread) |
4469 | send_reply = t; | 4536 | send_reply = t; |
4537 | } else { | ||
4538 | __acquire(&t->lock); | ||
4470 | } | 4539 | } |
4471 | thread->is_dead = true; | 4540 | thread->is_dead = true; |
4472 | 4541 | ||
@@ -4495,7 +4564,11 @@ static int binder_thread_release(struct binder_proc *proc, | |||
4495 | spin_unlock(&last_t->lock); | 4564 | spin_unlock(&last_t->lock); |
4496 | if (t) | 4565 | if (t) |
4497 | spin_lock(&t->lock); | 4566 | spin_lock(&t->lock); |
4567 | else | ||
4568 | __acquire(&t->lock); | ||
4498 | } | 4569 | } |
4570 | /* annotation for sparse, lock not acquired in last iteration above */ | ||
4571 | __release(&t->lock); | ||
4499 | 4572 | ||
4500 | /* | 4573 | /* |
4501 | * If this thread used poll, make sure we remove the waitqueue | 4574 | * If this thread used poll, make sure we remove the waitqueue |
@@ -4938,8 +5011,12 @@ static int binder_open(struct inode *nodp, struct file *filp) | |||
4938 | proc->tsk = current->group_leader; | 5011 | proc->tsk = current->group_leader; |
4939 | INIT_LIST_HEAD(&proc->todo); | 5012 | INIT_LIST_HEAD(&proc->todo); |
4940 | proc->default_priority = task_nice(current); | 5013 | proc->default_priority = task_nice(current); |
4941 | binder_dev = container_of(filp->private_data, struct binder_device, | 5014 | /* binderfs stashes devices in i_private */ |
4942 | miscdev); | 5015 | if (is_binderfs_device(nodp)) |
5016 | binder_dev = nodp->i_private; | ||
5017 | else | ||
5018 | binder_dev = container_of(filp->private_data, | ||
5019 | struct binder_device, miscdev); | ||
4943 | proc->context = &binder_dev->context; | 5020 | proc->context = &binder_dev->context; |
4944 | binder_alloc_init(&proc->alloc); | 5021 | binder_alloc_init(&proc->alloc); |
4945 | 5022 | ||
@@ -4967,7 +5044,7 @@ static int binder_open(struct inode *nodp, struct file *filp) | |||
4967 | proc->debugfs_entry = debugfs_create_file(strbuf, 0444, | 5044 | proc->debugfs_entry = debugfs_create_file(strbuf, 0444, |
4968 | binder_debugfs_dir_entry_proc, | 5045 | binder_debugfs_dir_entry_proc, |
4969 | (void *)(unsigned long)proc->pid, | 5046 | (void *)(unsigned long)proc->pid, |
4970 | &binder_proc_fops); | 5047 | &proc_fops); |
4971 | } | 5048 | } |
4972 | 5049 | ||
4973 | return 0; | 5050 | return 0; |
@@ -5391,6 +5468,9 @@ static void print_binder_proc(struct seq_file *m, | |||
5391 | for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { | 5468 | for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { |
5392 | struct binder_node *node = rb_entry(n, struct binder_node, | 5469 | struct binder_node *node = rb_entry(n, struct binder_node, |
5393 | rb_node); | 5470 | rb_node); |
5471 | if (!print_all && !node->has_async_transaction) | ||
5472 | continue; | ||
5473 | |||
5394 | /* | 5474 | /* |
5395 | * take a temporary reference on the node so it | 5475 | * take a temporary reference on the node so it |
5396 | * survives and isn't removed from the tree | 5476 | * survives and isn't removed from the tree |
@@ -5595,7 +5675,7 @@ static void print_binder_proc_stats(struct seq_file *m, | |||
5595 | } | 5675 | } |
5596 | 5676 | ||
5597 | 5677 | ||
5598 | static int binder_state_show(struct seq_file *m, void *unused) | 5678 | static int state_show(struct seq_file *m, void *unused) |
5599 | { | 5679 | { |
5600 | struct binder_proc *proc; | 5680 | struct binder_proc *proc; |
5601 | struct binder_node *node; | 5681 | struct binder_node *node; |
@@ -5634,7 +5714,7 @@ static int binder_state_show(struct seq_file *m, void *unused) | |||
5634 | return 0; | 5714 | return 0; |
5635 | } | 5715 | } |
5636 | 5716 | ||
5637 | static int binder_stats_show(struct seq_file *m, void *unused) | 5717 | static int stats_show(struct seq_file *m, void *unused) |
5638 | { | 5718 | { |
5639 | struct binder_proc *proc; | 5719 | struct binder_proc *proc; |
5640 | 5720 | ||
@@ -5650,7 +5730,7 @@ static int binder_stats_show(struct seq_file *m, void *unused) | |||
5650 | return 0; | 5730 | return 0; |
5651 | } | 5731 | } |
5652 | 5732 | ||
5653 | static int binder_transactions_show(struct seq_file *m, void *unused) | 5733 | static int transactions_show(struct seq_file *m, void *unused) |
5654 | { | 5734 | { |
5655 | struct binder_proc *proc; | 5735 | struct binder_proc *proc; |
5656 | 5736 | ||
@@ -5663,7 +5743,7 @@ static int binder_transactions_show(struct seq_file *m, void *unused) | |||
5663 | return 0; | 5743 | return 0; |
5664 | } | 5744 | } |
5665 | 5745 | ||
5666 | static int binder_proc_show(struct seq_file *m, void *unused) | 5746 | static int proc_show(struct seq_file *m, void *unused) |
5667 | { | 5747 | { |
5668 | struct binder_proc *itr; | 5748 | struct binder_proc *itr; |
5669 | int pid = (unsigned long)m->private; | 5749 | int pid = (unsigned long)m->private; |
@@ -5706,7 +5786,7 @@ static void print_binder_transaction_log_entry(struct seq_file *m, | |||
5706 | "\n" : " (incomplete)\n"); | 5786 | "\n" : " (incomplete)\n"); |
5707 | } | 5787 | } |
5708 | 5788 | ||
5709 | static int binder_transaction_log_show(struct seq_file *m, void *unused) | 5789 | static int transaction_log_show(struct seq_file *m, void *unused) |
5710 | { | 5790 | { |
5711 | struct binder_transaction_log *log = m->private; | 5791 | struct binder_transaction_log *log = m->private; |
5712 | unsigned int log_cur = atomic_read(&log->cur); | 5792 | unsigned int log_cur = atomic_read(&log->cur); |
@@ -5727,7 +5807,7 @@ static int binder_transaction_log_show(struct seq_file *m, void *unused) | |||
5727 | return 0; | 5807 | return 0; |
5728 | } | 5808 | } |
5729 | 5809 | ||
5730 | static const struct file_operations binder_fops = { | 5810 | const struct file_operations binder_fops = { |
5731 | .owner = THIS_MODULE, | 5811 | .owner = THIS_MODULE, |
5732 | .poll = binder_poll, | 5812 | .poll = binder_poll, |
5733 | .unlocked_ioctl = binder_ioctl, | 5813 | .unlocked_ioctl = binder_ioctl, |
@@ -5738,10 +5818,10 @@ static const struct file_operations binder_fops = { | |||
5738 | .release = binder_release, | 5818 | .release = binder_release, |
5739 | }; | 5819 | }; |
5740 | 5820 | ||
5741 | BINDER_DEBUG_ENTRY(state); | 5821 | DEFINE_SHOW_ATTRIBUTE(state); |
5742 | BINDER_DEBUG_ENTRY(stats); | 5822 | DEFINE_SHOW_ATTRIBUTE(stats); |
5743 | BINDER_DEBUG_ENTRY(transactions); | 5823 | DEFINE_SHOW_ATTRIBUTE(transactions); |
5744 | BINDER_DEBUG_ENTRY(transaction_log); | 5824 | DEFINE_SHOW_ATTRIBUTE(transaction_log); |
5745 | 5825 | ||
5746 | static int __init init_binder_device(const char *name) | 5826 | static int __init init_binder_device(const char *name) |
5747 | { | 5827 | { |
@@ -5795,27 +5875,27 @@ static int __init binder_init(void) | |||
5795 | 0444, | 5875 | 0444, |
5796 | binder_debugfs_dir_entry_root, | 5876 | binder_debugfs_dir_entry_root, |
5797 | NULL, | 5877 | NULL, |
5798 | &binder_state_fops); | 5878 | &state_fops); |
5799 | debugfs_create_file("stats", | 5879 | debugfs_create_file("stats", |
5800 | 0444, | 5880 | 0444, |
5801 | binder_debugfs_dir_entry_root, | 5881 | binder_debugfs_dir_entry_root, |
5802 | NULL, | 5882 | NULL, |
5803 | &binder_stats_fops); | 5883 | &stats_fops); |
5804 | debugfs_create_file("transactions", | 5884 | debugfs_create_file("transactions", |
5805 | 0444, | 5885 | 0444, |
5806 | binder_debugfs_dir_entry_root, | 5886 | binder_debugfs_dir_entry_root, |
5807 | NULL, | 5887 | NULL, |
5808 | &binder_transactions_fops); | 5888 | &transactions_fops); |
5809 | debugfs_create_file("transaction_log", | 5889 | debugfs_create_file("transaction_log", |
5810 | 0444, | 5890 | 0444, |
5811 | binder_debugfs_dir_entry_root, | 5891 | binder_debugfs_dir_entry_root, |
5812 | &binder_transaction_log, | 5892 | &binder_transaction_log, |
5813 | &binder_transaction_log_fops); | 5893 | &transaction_log_fops); |
5814 | debugfs_create_file("failed_transaction_log", | 5894 | debugfs_create_file("failed_transaction_log", |
5815 | 0444, | 5895 | 0444, |
5816 | binder_debugfs_dir_entry_root, | 5896 | binder_debugfs_dir_entry_root, |
5817 | &binder_transaction_log_failed, | 5897 | &binder_transaction_log_failed, |
5818 | &binder_transaction_log_fops); | 5898 | &transaction_log_fops); |
5819 | } | 5899 | } |
5820 | 5900 | ||
5821 | /* | 5901 | /* |