diff options
author | Todd Kjos <tkjos@android.com> | 2017-06-29 15:02:00 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-07-17 08:48:23 -0400 |
commit | 9630fe8839baf3f47df9187ca720cfa1c10b132e (patch) | |
tree | 884b9d5e174886a29eb66dc76698f70066508f26 /drivers/android/binder.c | |
parent | adc1884222276df6eb018f78bccacbcd78a0b9f6 (diff) |
binder: introduce locking helper functions
There are 3 main spinlocks which must be acquired in this
order:
1) proc->outer_lock : protects most fields of binder_proc,
binder_thread, and binder_ref structures. binder_proc_lock()
and binder_proc_unlock() are used to acq/rel.
2) node->lock : protects most fields of binder_node.
binder_node_lock() and binder_node_unlock() are
used to acq/rel
3) proc->inner_lock : protects the thread and node lists
(proc->threads, proc->nodes) and all todo lists associated
with the binder_proc (proc->todo, thread->todo,
proc->delivered_death and node->async_todo).
binder_inner_proc_lock() and binder_inner_proc_unlock()
are used to acq/rel
Any lock under procA must never be nested under any lock at the same
level or below on procB.
Functions that require a lock held on entry indicate which lock
in the suffix of the function name:
foo_olocked() : requires node->outer_lock
foo_nlocked() : requires node->lock
foo_ilocked() : requires proc->inner_lock
foo_iolocked(): requires proc->outer_lock and proc->inner_lock
foo_nilocked(): requires node->lock and proc->inner_lock
Signed-off-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android/binder.c')
-rw-r--r-- | drivers/android/binder.c | 238 |
1 files changed, 238 insertions, 0 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index ec050c6d1192..91fece5c067f 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c | |||
@@ -15,6 +15,39 @@ | |||
15 | * | 15 | * |
16 | */ | 16 | */ |
17 | 17 | ||
18 | /* | ||
19 | * Locking overview | ||
20 | * | ||
21 | * There are 3 main spinlocks which must be acquired in the | ||
22 | * order shown: | ||
23 | * | ||
24 | * 1) proc->outer_lock : protects binder_ref | ||
25 | * binder_proc_lock() and binder_proc_unlock() are | ||
26 | * used to acq/rel. | ||
27 | * 2) node->lock : protects most fields of binder_node. | ||
28 | * binder_node_lock() and binder_node_unlock() are | ||
29 | * used to acq/rel | ||
30 | * 3) proc->inner_lock : protects the thread and node lists | ||
31 | * (proc->threads, proc->nodes) and all todo lists associated | ||
32 | * with the binder_proc (proc->todo, thread->todo, | ||
33 | * proc->delivered_death and node->async_todo). | ||
34 | * binder_inner_proc_lock() and binder_inner_proc_unlock() | ||
35 | * are used to acq/rel | ||
36 | * | ||
37 | * Any lock under procA must never be nested under any lock at the same | ||
38 | * level or below on procB. | ||
39 | * | ||
40 | * Functions that require a lock held on entry indicate which lock | ||
41 | * in the suffix of the function name: | ||
42 | * | ||
43 | * foo_olocked() : requires node->outer_lock | ||
44 | * foo_nlocked() : requires node->lock | ||
45 | * foo_ilocked() : requires proc->inner_lock | ||
46 | * foo_oilocked(): requires proc->outer_lock and proc->inner_lock | ||
47 | * foo_nilocked(): requires node->lock and proc->inner_lock | ||
48 | * ... | ||
49 | */ | ||
50 | |||
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 51 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
19 | 52 | ||
20 | #include <asm/cacheflush.h> | 53 | #include <asm/cacheflush.h> |
@@ -36,6 +69,7 @@ | |||
36 | #include <linux/uaccess.h> | 69 | #include <linux/uaccess.h> |
37 | #include <linux/pid_namespace.h> | 70 | #include <linux/pid_namespace.h> |
38 | #include <linux/security.h> | 71 | #include <linux/security.h> |
72 | #include <linux/spinlock.h> | ||
39 | 73 | ||
40 | #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT | 74 | #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT |
41 | #define BINDER_IPC_32BIT 1 | 75 | #define BINDER_IPC_32BIT 1 |
@@ -106,6 +140,7 @@ enum { | |||
106 | BINDER_DEBUG_FREE_BUFFER = 1U << 11, | 140 | BINDER_DEBUG_FREE_BUFFER = 1U << 11, |
107 | BINDER_DEBUG_INTERNAL_REFS = 1U << 12, | 141 | BINDER_DEBUG_INTERNAL_REFS = 1U << 12, |
108 | BINDER_DEBUG_PRIORITY_CAP = 1U << 13, | 142 | BINDER_DEBUG_PRIORITY_CAP = 1U << 13, |
143 | BINDER_DEBUG_SPINLOCKS = 1U << 14, | ||
109 | }; | 144 | }; |
110 | static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | | 145 | static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | |
111 | BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; | 146 | BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; |
@@ -262,8 +297,43 @@ struct binder_error { | |||
262 | uint32_t cmd; | 297 | uint32_t cmd; |
263 | }; | 298 | }; |
264 | 299 | ||
300 | /** | ||
301 | * struct binder_node - binder node bookkeeping | ||
302 | * @debug_id: unique ID for debugging | ||
303 | * (invariant after initialized) | ||
304 | * @lock: lock for node fields | ||
305 | * @work: worklist element for node work | ||
306 | * @rb_node: element for proc->nodes tree | ||
307 | * @dead_node: element for binder_dead_nodes list | ||
308 | * (protected by binder_dead_nodes_lock) | ||
309 | * @proc: binder_proc that owns this node | ||
310 | * (invariant after initialized) | ||
311 | * @refs: list of references on this node | ||
312 | * @internal_strong_refs: used to take strong references when | ||
313 | * initiating a transaction | ||
314 | * @local_weak_refs: weak user refs from local process | ||
315 | * @local_strong_refs: strong user refs from local process | ||
316 | * @tmp_refs: temporary kernel refs | ||
317 | * @ptr: userspace pointer for node | ||
318 | * (invariant, no lock needed) | ||
319 | * @cookie: userspace cookie for node | ||
320 | * (invariant, no lock needed) | ||
321 | * @has_strong_ref: userspace notified of strong ref | ||
322 | * @pending_strong_ref: userspace has acked notification of strong ref | ||
323 | * @has_weak_ref: userspace notified of weak ref | ||
324 | * @pending_weak_ref: userspace has acked notification of weak ref | ||
325 | * @has_async_transaction: async transaction to node in progress | ||
326 | * @accept_fds: file descriptor operations supported for node | ||
327 | * (invariant after initialized) | ||
328 | * @min_priority: minimum scheduling priority | ||
329 | * (invariant after initialized) | ||
330 | * @async_todo: list of async work items | ||
331 | * | ||
332 | * Bookkeeping structure for binder nodes. | ||
333 | */ | ||
265 | struct binder_node { | 334 | struct binder_node { |
266 | int debug_id; | 335 | int debug_id; |
336 | spinlock_t lock; | ||
267 | struct binder_work work; | 337 | struct binder_work work; |
268 | union { | 338 | union { |
269 | struct rb_node rb_node; | 339 | struct rb_node rb_node; |
@@ -346,6 +416,51 @@ enum binder_deferred_state { | |||
346 | BINDER_DEFERRED_RELEASE = 0x04, | 416 | BINDER_DEFERRED_RELEASE = 0x04, |
347 | }; | 417 | }; |
348 | 418 | ||
419 | /** | ||
420 | * struct binder_proc - binder process bookkeeping | ||
421 | * @proc_node: element for binder_procs list | ||
422 | * @threads: rbtree of binder_threads in this proc | ||
423 | * @nodes: rbtree of binder nodes associated with | ||
424 | * this proc ordered by node->ptr | ||
425 | * @refs_by_desc: rbtree of refs ordered by ref->desc | ||
426 | * @refs_by_node: rbtree of refs ordered by ref->node | ||
427 | * @pid PID of group_leader of process | ||
428 | * (invariant after initialized) | ||
429 | * @tsk task_struct for group_leader of process | ||
430 | * (invariant after initialized) | ||
431 | * @files files_struct for process | ||
432 | * (invariant after initialized) | ||
433 | * @deferred_work_node: element for binder_deferred_list | ||
434 | * (protected by binder_deferred_lock) | ||
435 | * @deferred_work: bitmap of deferred work to perform | ||
436 | * (protected by binder_deferred_lock) | ||
437 | * @is_dead: process is dead and awaiting free | ||
438 | * when outstanding transactions are cleaned up | ||
439 | * @todo: list of work for this process | ||
440 | * @wait: wait queue head to wait for proc work | ||
441 | * (invariant after initialized) | ||
442 | * @stats: per-process binder statistics | ||
443 | * (atomics, no lock needed) | ||
444 | * @delivered_death: list of delivered death notification | ||
445 | * @max_threads: cap on number of binder threads | ||
446 | * @requested_threads: number of binder threads requested but not | ||
447 | * yet started. In current implementation, can | ||
448 | * only be 0 or 1. | ||
449 | * @requested_threads_started: number binder threads started | ||
450 | * @ready_threads: number of threads waiting for proc work | ||
451 | * @tmp_ref: temporary reference to indicate proc is in use | ||
452 | * @default_priority: default scheduler priority | ||
453 | * (invariant after initialized) | ||
454 | * @debugfs_entry: debugfs node | ||
455 | * @alloc: binder allocator bookkeeping | ||
456 | * @context: binder_context for this proc | ||
457 | * (invariant after initialized) | ||
458 | * @inner_lock: can nest under outer_lock and/or node lock | ||
459 | * @outer_lock: no nesting under innor or node lock | ||
460 | * Lock order: 1) outer, 2) node, 3) inner | ||
461 | * | ||
462 | * Bookkeeping structure for binder processes | ||
463 | */ | ||
349 | struct binder_proc { | 464 | struct binder_proc { |
350 | struct hlist_node proc_node; | 465 | struct hlist_node proc_node; |
351 | struct rb_root threads; | 466 | struct rb_root threads; |
@@ -372,6 +487,8 @@ struct binder_proc { | |||
372 | struct dentry *debugfs_entry; | 487 | struct dentry *debugfs_entry; |
373 | struct binder_alloc alloc; | 488 | struct binder_alloc alloc; |
374 | struct binder_context *context; | 489 | struct binder_context *context; |
490 | spinlock_t inner_lock; | ||
491 | spinlock_t outer_lock; | ||
375 | }; | 492 | }; |
376 | 493 | ||
377 | enum { | 494 | enum { |
@@ -382,6 +499,33 @@ enum { | |||
382 | BINDER_LOOPER_STATE_WAITING = 0x10, | 499 | BINDER_LOOPER_STATE_WAITING = 0x10, |
383 | }; | 500 | }; |
384 | 501 | ||
502 | /** | ||
503 | * struct binder_thread - binder thread bookkeeping | ||
504 | * @proc: binder process for this thread | ||
505 | * (invariant after initialization) | ||
506 | * @rb_node: element for proc->threads rbtree | ||
507 | * @pid: PID for this thread | ||
508 | * (invariant after initialization) | ||
509 | * @looper: bitmap of looping state | ||
510 | * (only accessed by this thread) | ||
511 | * @looper_needs_return: looping thread needs to exit driver | ||
512 | * (no lock needed) | ||
513 | * @transaction_stack: stack of in-progress transactions for this thread | ||
514 | * @todo: list of work to do for this thread | ||
515 | * @return_error: transaction errors reported by this thread | ||
516 | * (only accessed by this thread) | ||
517 | * @reply_error: transaction errors reported by target thread | ||
518 | * @wait: wait queue for thread work | ||
519 | * @stats: per-thread statistics | ||
520 | * (atomics, no lock needed) | ||
521 | * @tmp_ref: temporary reference to indicate thread is in use | ||
522 | * (atomic since @proc->inner_lock cannot | ||
523 | * always be acquired) | ||
524 | * @is_dead: thread is dead and awaiting free | ||
525 | * when outstanding transactions are cleaned up | ||
526 | * | ||
527 | * Bookkeeping structure for binder threads. | ||
528 | */ | ||
385 | struct binder_thread { | 529 | struct binder_thread { |
386 | struct binder_proc *proc; | 530 | struct binder_proc *proc; |
387 | struct rb_node rb_node; | 531 | struct rb_node rb_node; |
@@ -424,6 +568,97 @@ struct binder_transaction { | |||
424 | spinlock_t lock; | 568 | spinlock_t lock; |
425 | }; | 569 | }; |
426 | 570 | ||
571 | /** | ||
572 | * binder_proc_lock() - Acquire outer lock for given binder_proc | ||
573 | * @proc: struct binder_proc to acquire | ||
574 | * | ||
575 | * Acquires proc->outer_lock. Used to protect binder_ref | ||
576 | * structures associated with the given proc. | ||
577 | */ | ||
578 | #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) | ||
579 | static void | ||
580 | _binder_proc_lock(struct binder_proc *proc, int line) | ||
581 | { | ||
582 | binder_debug(BINDER_DEBUG_SPINLOCKS, | ||
583 | "%s: line=%d\n", __func__, line); | ||
584 | spin_lock(&proc->outer_lock); | ||
585 | } | ||
586 | |||
587 | /** | ||
588 | * binder_proc_unlock() - Release spinlock for given binder_proc | ||
589 | * @proc: struct binder_proc to acquire | ||
590 | * | ||
591 | * Release lock acquired via binder_proc_lock() | ||
592 | */ | ||
593 | #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) | ||
594 | static void | ||
595 | _binder_proc_unlock(struct binder_proc *proc, int line) | ||
596 | { | ||
597 | binder_debug(BINDER_DEBUG_SPINLOCKS, | ||
598 | "%s: line=%d\n", __func__, line); | ||
599 | spin_unlock(&proc->outer_lock); | ||
600 | } | ||
601 | |||
602 | /** | ||
603 | * binder_inner_proc_lock() - Acquire inner lock for given binder_proc | ||
604 | * @proc: struct binder_proc to acquire | ||
605 | * | ||
606 | * Acquires proc->inner_lock. Used to protect todo lists | ||
607 | */ | ||
608 | #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) | ||
609 | static void | ||
610 | _binder_inner_proc_lock(struct binder_proc *proc, int line) | ||
611 | { | ||
612 | binder_debug(BINDER_DEBUG_SPINLOCKS, | ||
613 | "%s: line=%d\n", __func__, line); | ||
614 | spin_lock(&proc->inner_lock); | ||
615 | } | ||
616 | |||
617 | /** | ||
618 | * binder_inner_proc_unlock() - Release inner lock for given binder_proc | ||
619 | * @proc: struct binder_proc to acquire | ||
620 | * | ||
621 | * Release lock acquired via binder_inner_proc_lock() | ||
622 | */ | ||
623 | #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) | ||
624 | static void | ||
625 | _binder_inner_proc_unlock(struct binder_proc *proc, int line) | ||
626 | { | ||
627 | binder_debug(BINDER_DEBUG_SPINLOCKS, | ||
628 | "%s: line=%d\n", __func__, line); | ||
629 | spin_unlock(&proc->inner_lock); | ||
630 | } | ||
631 | |||
632 | /** | ||
633 | * binder_node_lock() - Acquire spinlock for given binder_node | ||
634 | * @node: struct binder_node to acquire | ||
635 | * | ||
636 | * Acquires node->lock. Used to protect binder_node fields | ||
637 | */ | ||
638 | #define binder_node_lock(node) _binder_node_lock(node, __LINE__) | ||
639 | static void | ||
640 | _binder_node_lock(struct binder_node *node, int line) | ||
641 | { | ||
642 | binder_debug(BINDER_DEBUG_SPINLOCKS, | ||
643 | "%s: line=%d\n", __func__, line); | ||
644 | spin_lock(&node->lock); | ||
645 | } | ||
646 | |||
647 | /** | ||
648 | * binder_node_unlock() - Release spinlock for given binder_proc | ||
649 | * @node: struct binder_node to acquire | ||
650 | * | ||
651 | * Release lock acquired via binder_node_lock() | ||
652 | */ | ||
653 | #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) | ||
654 | static void | ||
655 | _binder_node_unlock(struct binder_node *node, int line) | ||
656 | { | ||
657 | binder_debug(BINDER_DEBUG_SPINLOCKS, | ||
658 | "%s: line=%d\n", __func__, line); | ||
659 | spin_unlock(&node->lock); | ||
660 | } | ||
661 | |||
427 | static void | 662 | static void |
428 | binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); | 663 | binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); |
429 | static void binder_free_thread(struct binder_thread *thread); | 664 | static void binder_free_thread(struct binder_thread *thread); |
@@ -568,6 +803,7 @@ static struct binder_node *binder_new_node(struct binder_proc *proc, | |||
568 | node->ptr = ptr; | 803 | node->ptr = ptr; |
569 | node->cookie = cookie; | 804 | node->cookie = cookie; |
570 | node->work.type = BINDER_WORK_NODE; | 805 | node->work.type = BINDER_WORK_NODE; |
806 | spin_lock_init(&node->lock); | ||
571 | INIT_LIST_HEAD(&node->work.entry); | 807 | INIT_LIST_HEAD(&node->work.entry); |
572 | INIT_LIST_HEAD(&node->async_todo); | 808 | INIT_LIST_HEAD(&node->async_todo); |
573 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, | 809 | binder_debug(BINDER_DEBUG_INTERNAL_REFS, |
@@ -3599,6 +3835,8 @@ static int binder_open(struct inode *nodp, struct file *filp) | |||
3599 | proc = kzalloc(sizeof(*proc), GFP_KERNEL); | 3835 | proc = kzalloc(sizeof(*proc), GFP_KERNEL); |
3600 | if (proc == NULL) | 3836 | if (proc == NULL) |
3601 | return -ENOMEM; | 3837 | return -ENOMEM; |
3838 | spin_lock_init(&proc->inner_lock); | ||
3839 | spin_lock_init(&proc->outer_lock); | ||
3602 | get_task_struct(current->group_leader); | 3840 | get_task_struct(current->group_leader); |
3603 | proc->tsk = current->group_leader; | 3841 | proc->tsk = current->group_leader; |
3604 | INIT_LIST_HEAD(&proc->todo); | 3842 | INIT_LIST_HEAD(&proc->todo); |