aboutsummaryrefslogtreecommitdiffstats
path: root/net/netfilter
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-05-24 09:00:38 -0400
committerIngo Molnar <mingo@kernel.org>2016-06-14 05:55:16 -0400
commitb316ff783d17bd6217804e2e4385ce9347d7dad9 (patch)
tree3a9fad86e590ef5c74e4e6cc2948265d1a22dea5 /net/netfilter
parentbe3e7844980352756de4261b276ee2ba5be7a26b (diff)
locking/spinlock, netfilter: Fix nf_conntrack_lock() barriers
Even with spin_unlock_wait() fixed, nf_conntrack_lock{,_all}() is borken as it misses a bunch of memory barriers to order the whole global vs local locks scheme. Even x86 (and other TSO archs) are affected. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> [ Updated the comments. ] Cc: Andrew Morton <akpm@linux-foundation.org> Cc: David S. Miller <davem@davemloft.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'net/netfilter')
-rw-r--r--net/netfilter/nf_conntrack_core.c23
1 files changed, 22 insertions, 1 deletions
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index db2312eeb2a4..b8c5501d3872 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -83,6 +83,13 @@ void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
83 spin_lock(lock); 83 spin_lock(lock);
84 while (unlikely(nf_conntrack_locks_all)) { 84 while (unlikely(nf_conntrack_locks_all)) {
85 spin_unlock(lock); 85 spin_unlock(lock);
86
87 /*
88 * Order the 'nf_conntrack_locks_all' load vs. the
89 * spin_unlock_wait() loads below, to ensure
90 * that 'nf_conntrack_locks_all_lock' is indeed held:
91 */
92 smp_rmb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
86 spin_unlock_wait(&nf_conntrack_locks_all_lock); 93 spin_unlock_wait(&nf_conntrack_locks_all_lock);
87 spin_lock(lock); 94 spin_lock(lock);
88 } 95 }
@@ -128,6 +135,14 @@ static void nf_conntrack_all_lock(void)
128 spin_lock(&nf_conntrack_locks_all_lock); 135 spin_lock(&nf_conntrack_locks_all_lock);
129 nf_conntrack_locks_all = true; 136 nf_conntrack_locks_all = true;
130 137
138 /*
139 * Order the above store of 'nf_conntrack_locks_all' against
140 * the spin_unlock_wait() loads below, such that if
141 * nf_conntrack_lock() observes 'nf_conntrack_locks_all'
142 * we must observe nf_conntrack_locks[] held:
143 */
144 smp_mb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
145
131 for (i = 0; i < CONNTRACK_LOCKS; i++) { 146 for (i = 0; i < CONNTRACK_LOCKS; i++) {
132 spin_unlock_wait(&nf_conntrack_locks[i]); 147 spin_unlock_wait(&nf_conntrack_locks[i]);
133 } 148 }
@@ -135,7 +150,13 @@ static void nf_conntrack_all_lock(void)
135 150
136static void nf_conntrack_all_unlock(void) 151static void nf_conntrack_all_unlock(void)
137{ 152{
138 nf_conntrack_locks_all = false; 153 /*
154 * All prior stores must be complete before we clear
155 * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
156 * might observe the false value but not the entire
157 * critical section:
158 */
159 smp_store_release(&nf_conntrack_locks_all, false);
139 spin_unlock(&nf_conntrack_locks_all_lock); 160 spin_unlock(&nf_conntrack_locks_all_lock);
140} 161}
141 162