diff options
author | Ingo Molnar <mingo@kernel.org> | 2016-03-15 04:00:12 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-03-15 04:01:06 -0400 |
commit | 8bc6782fe20bd2584c73a35c47329c9fd0a8d34c (patch) | |
tree | c7fc6f467ee212e4ef442e70843c48fcf3c67c17 /kernel/rcu/tree.h | |
parent | e23604edac2a7be6a8808a5d13fac6b9df4eb9a8 (diff) | |
parent | 3500efae4410454522697c94c23fc40323c0cee9 (diff) |
Merge commit 'fixes.2015.02.23a' into core/rcu
Conflicts:
kernel/rcu/tree.c
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/rcu/tree.h')
-rw-r--r-- | kernel/rcu/tree.h | 42 |
1 files changed, 31 insertions, 11 deletions
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index bbd235d0e71f..df668c0f9e64 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h | |||
@@ -150,8 +150,9 @@ struct rcu_dynticks { | |||
150 | * Definition for node within the RCU grace-period-detection hierarchy. | 150 | * Definition for node within the RCU grace-period-detection hierarchy. |
151 | */ | 151 | */ |
152 | struct rcu_node { | 152 | struct rcu_node { |
153 | raw_spinlock_t lock; /* Root rcu_node's lock protects some */ | 153 | raw_spinlock_t __private lock; /* Root rcu_node's lock protects */ |
154 | /* rcu_state fields as well as following. */ | 154 | /* some rcu_state fields as well as */ |
155 | /* following. */ | ||
155 | unsigned long gpnum; /* Current grace period for this node. */ | 156 | unsigned long gpnum; /* Current grace period for this node. */ |
156 | /* This will either be equal to or one */ | 157 | /* This will either be equal to or one */ |
157 | /* behind the root rcu_node's gpnum. */ | 158 | /* behind the root rcu_node's gpnum. */ |
@@ -682,7 +683,7 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) | |||
682 | #endif /* #else #ifdef CONFIG_PPC */ | 683 | #endif /* #else #ifdef CONFIG_PPC */ |
683 | 684 | ||
684 | /* | 685 | /* |
685 | * Wrappers for the rcu_node::lock acquire. | 686 | * Wrappers for the rcu_node::lock acquire and release. |
686 | * | 687 | * |
687 | * Because the rcu_nodes form a tree, the tree traversal locking will observe | 688 | * Because the rcu_nodes form a tree, the tree traversal locking will observe |
688 | * different lock values, this in turn means that an UNLOCK of one level | 689 | * different lock values, this in turn means that an UNLOCK of one level |
@@ -691,29 +692,48 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) | |||
691 | * | 692 | * |
692 | * In order to restore full ordering between tree levels, augment the regular | 693 | * In order to restore full ordering between tree levels, augment the regular |
693 | * lock acquire functions with smp_mb__after_unlock_lock(). | 694 | * lock acquire functions with smp_mb__after_unlock_lock(). |
695 | * | ||
696 | * As ->lock of struct rcu_node is a __private field, therefore one should use | ||
697 | * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock. | ||
694 | */ | 698 | */ |
695 | static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp) | 699 | static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp) |
696 | { | 700 | { |
697 | raw_spin_lock(&rnp->lock); | 701 | raw_spin_lock(&ACCESS_PRIVATE(rnp, lock)); |
698 | smp_mb__after_unlock_lock(); | 702 | smp_mb__after_unlock_lock(); |
699 | } | 703 | } |
700 | 704 | ||
705 | static inline void raw_spin_unlock_rcu_node(struct rcu_node *rnp) | ||
706 | { | ||
707 | raw_spin_unlock(&ACCESS_PRIVATE(rnp, lock)); | ||
708 | } | ||
709 | |||
701 | static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp) | 710 | static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp) |
702 | { | 711 | { |
703 | raw_spin_lock_irq(&rnp->lock); | 712 | raw_spin_lock_irq(&ACCESS_PRIVATE(rnp, lock)); |
704 | smp_mb__after_unlock_lock(); | 713 | smp_mb__after_unlock_lock(); |
705 | } | 714 | } |
706 | 715 | ||
707 | #define raw_spin_lock_irqsave_rcu_node(rnp, flags) \ | 716 | static inline void raw_spin_unlock_irq_rcu_node(struct rcu_node *rnp) |
708 | do { \ | 717 | { |
709 | typecheck(unsigned long, flags); \ | 718 | raw_spin_unlock_irq(&ACCESS_PRIVATE(rnp, lock)); |
710 | raw_spin_lock_irqsave(&(rnp)->lock, flags); \ | 719 | } |
711 | smp_mb__after_unlock_lock(); \ | 720 | |
721 | #define raw_spin_lock_irqsave_rcu_node(rnp, flags) \ | ||
722 | do { \ | ||
723 | typecheck(unsigned long, flags); \ | ||
724 | raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags); \ | ||
725 | smp_mb__after_unlock_lock(); \ | ||
726 | } while (0) | ||
727 | |||
728 | #define raw_spin_unlock_irqrestore_rcu_node(rnp, flags) \ | ||
729 | do { \ | ||
730 | typecheck(unsigned long, flags); \ | ||
731 | raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags); \ | ||
712 | } while (0) | 732 | } while (0) |
713 | 733 | ||
714 | static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp) | 734 | static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp) |
715 | { | 735 | { |
716 | bool locked = raw_spin_trylock(&rnp->lock); | 736 | bool locked = raw_spin_trylock(&ACCESS_PRIVATE(rnp, lock)); |
717 | 737 | ||
718 | if (locked) | 738 | if (locked) |
719 | smp_mb__after_unlock_lock(); | 739 | smp_mb__after_unlock_lock(); |