aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-10-08 06:24:23 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-11-23 13:37:35 -0500
commit2a67e741bbbc022e0fadf8c6dbc3a76019ecd0cf (patch)
tree2f06a10ebf9f27272f6de99b0da5c19c5bdde6f6 /kernel/rcu/tree.h
parent1ec218373b8ebda821aec00bb156a9c94fad9cd4 (diff)
rcu: Create transitive rnp->lock acquisition functions
Providing RCU's memory-ordering guarantees requires that the rcu_node tree's locking provide transitive memory ordering, which the Linux kernel's spinlocks currently do not provide unless smp_mb__after_unlock_lock() is used. Having a separate smp_mb__after_unlock_lock() after each and every lock acquisition is error-prone, hard to read, and a bit annoying, so this commit provides wrapper functions that pull in the smp_mb__after_unlock_lock() invocations. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.h')
-rw-r--r--kernel/rcu/tree.h39
1 files changed, 39 insertions, 0 deletions
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 9fb4e238d4dc..f32bebb6bc90 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -664,3 +664,42 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
664#else /* #ifdef CONFIG_PPC */ 664#else /* #ifdef CONFIG_PPC */
665#define smp_mb__after_unlock_lock() do { } while (0) 665#define smp_mb__after_unlock_lock() do { } while (0)
666#endif /* #else #ifdef CONFIG_PPC */ 666#endif /* #else #ifdef CONFIG_PPC */
667
668/*
669 * Wrappers for the rcu_node::lock acquire.
670 *
671 * Because the rcu_nodes form a tree, the tree traversal locking will observe
672 * different lock values, this in turn means that an UNLOCK of one level
673 * followed by a LOCK of another level does not imply a full memory barrier;
674 * and most importantly transitivity is lost.
675 *
676 * In order to restore full ordering between tree levels, augment the regular
677 * lock acquire functions with smp_mb__after_unlock_lock().
678 */
679static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp)
680{
681 raw_spin_lock(&rnp->lock);
682 smp_mb__after_unlock_lock();
683}
684
685static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp)
686{
687 raw_spin_lock_irq(&rnp->lock);
688 smp_mb__after_unlock_lock();
689}
690
691#define raw_spin_lock_irqsave_rcu_node(rnp, flags) \
692do { \
693 typecheck(unsigned long, flags); \
694 raw_spin_lock_irqsave(&(rnp)->lock, flags); \
695 smp_mb__after_unlock_lock(); \
696} while (0)
697
698static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp)
699{
700 bool locked = raw_spin_trylock(&rnp->lock);
701
702 if (locked)
703 smp_mb__after_unlock_lock();
704 return locked;
705}