diff options
author | Boqun Feng <boqun.feng@gmail.com> | 2015-12-28 23:18:47 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2016-02-23 22:59:54 -0500 |
commit | 67c583a7de3433a971983490b37ad2bff3c55463 (patch) | |
tree | b6d5123664992047b393594cff856de006fb86b4 /kernel/rcu/tree.h | |
parent | ad315455d396a1cbcb2f9fdd687b7e1b26b789e7 (diff) |
RCU: Privatize rcu_node::lock
In patch:
"rcu: Add transitivity to remaining rcu_node ->lock acquisitions"
All locking operations on rcu_node::lock are replaced with the wrappers
because of the need of transitivity, which indicates we should never
write code using LOCK primitives alone(i.e. without a proper barrier
following) on rcu_node::lock outside those wrappers. We could detect
this kind of misuses on rcu_node::lock in the future by adding __private
modifier on rcu_node::lock.
To privatize rcu_node::lock, unlock wrappers are also needed. Replacing
spinlock unlocks with these wrappers not only privatizes rcu_node::lock
but also makes it easier to figure out critical sections of rcu_node.
This patch adds __private modifier to rcu_node::lock and makes every
access to it wrapped by ACCESS_PRIVATE(). Besides, unlock wrappers are
added and raw_spin_unlock(&rnp->lock) and its friends are replaced with
those wrappers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.h')
-rw-r--r-- | kernel/rcu/tree.h | 42 |
1 files changed, 31 insertions, 11 deletions
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 83360b4f4352..4886d6a03353 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h | |||
@@ -149,8 +149,9 @@ struct rcu_dynticks { | |||
149 | * Definition for node within the RCU grace-period-detection hierarchy. | 149 | * Definition for node within the RCU grace-period-detection hierarchy. |
150 | */ | 150 | */ |
151 | struct rcu_node { | 151 | struct rcu_node { |
152 | raw_spinlock_t lock; /* Root rcu_node's lock protects some */ | 152 | raw_spinlock_t __private lock; /* Root rcu_node's lock protects */ |
153 | /* rcu_state fields as well as following. */ | 153 | /* some rcu_state fields as well as */ |
154 | /* following. */ | ||
154 | unsigned long gpnum; /* Current grace period for this node. */ | 155 | unsigned long gpnum; /* Current grace period for this node. */ |
155 | /* This will either be equal to or one */ | 156 | /* This will either be equal to or one */ |
156 | /* behind the root rcu_node's gpnum. */ | 157 | /* behind the root rcu_node's gpnum. */ |
@@ -680,7 +681,7 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) | |||
680 | #endif /* #else #ifdef CONFIG_PPC */ | 681 | #endif /* #else #ifdef CONFIG_PPC */ |
681 | 682 | ||
682 | /* | 683 | /* |
683 | * Wrappers for the rcu_node::lock acquire. | 684 | * Wrappers for the rcu_node::lock acquire and release. |
684 | * | 685 | * |
685 | * Because the rcu_nodes form a tree, the tree traversal locking will observe | 686 | * Because the rcu_nodes form a tree, the tree traversal locking will observe |
686 | * different lock values, this in turn means that an UNLOCK of one level | 687 | * different lock values, this in turn means that an UNLOCK of one level |
@@ -689,29 +690,48 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) | |||
689 | * | 690 | * |
690 | * In order to restore full ordering between tree levels, augment the regular | 691 | * In order to restore full ordering between tree levels, augment the regular |
691 | * lock acquire functions with smp_mb__after_unlock_lock(). | 692 | * lock acquire functions with smp_mb__after_unlock_lock(). |
693 | * | ||
694 | * As ->lock of struct rcu_node is a __private field, therefore one should use | ||
695 | * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock. | ||
692 | */ | 696 | */ |
693 | static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp) | 697 | static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp) |
694 | { | 698 | { |
695 | raw_spin_lock(&rnp->lock); | 699 | raw_spin_lock(&ACCESS_PRIVATE(rnp, lock)); |
696 | smp_mb__after_unlock_lock(); | 700 | smp_mb__after_unlock_lock(); |
697 | } | 701 | } |
698 | 702 | ||
703 | static inline void raw_spin_unlock_rcu_node(struct rcu_node *rnp) | ||
704 | { | ||
705 | raw_spin_unlock(&ACCESS_PRIVATE(rnp, lock)); | ||
706 | } | ||
707 | |||
699 | static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp) | 708 | static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp) |
700 | { | 709 | { |
701 | raw_spin_lock_irq(&rnp->lock); | 710 | raw_spin_lock_irq(&ACCESS_PRIVATE(rnp, lock)); |
702 | smp_mb__after_unlock_lock(); | 711 | smp_mb__after_unlock_lock(); |
703 | } | 712 | } |
704 | 713 | ||
705 | #define raw_spin_lock_irqsave_rcu_node(rnp, flags) \ | 714 | static inline void raw_spin_unlock_irq_rcu_node(struct rcu_node *rnp) |
706 | do { \ | 715 | { |
707 | typecheck(unsigned long, flags); \ | 716 | raw_spin_unlock_irq(&ACCESS_PRIVATE(rnp, lock)); |
708 | raw_spin_lock_irqsave(&(rnp)->lock, flags); \ | 717 | } |
709 | smp_mb__after_unlock_lock(); \ | 718 | |
719 | #define raw_spin_lock_irqsave_rcu_node(rnp, flags) \ | ||
720 | do { \ | ||
721 | typecheck(unsigned long, flags); \ | ||
722 | raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags); \ | ||
723 | smp_mb__after_unlock_lock(); \ | ||
724 | } while (0) | ||
725 | |||
726 | #define raw_spin_unlock_irqrestore_rcu_node(rnp, flags) \ | ||
727 | do { \ | ||
728 | typecheck(unsigned long, flags); \ | ||
729 | raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags); \ | ||
710 | } while (0) | 730 | } while (0) |
711 | 731 | ||
712 | static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp) | 732 | static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp) |
713 | { | 733 | { |
714 | bool locked = raw_spin_trylock(&rnp->lock); | 734 | bool locked = raw_spin_trylock(&ACCESS_PRIVATE(rnp, lock)); |
715 | 735 | ||
716 | if (locked) | 736 | if (locked) |
717 | smp_mb__after_unlock_lock(); | 737 | smp_mb__after_unlock_lock(); |