aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJiri Olsa <jolsa@redhat.com>2009-07-08 08:10:31 -0400
committerDavid S. Miller <davem@davemloft.net>2009-07-09 20:06:58 -0400
commitad46276952f1af34cd91d46d49ba13d347d56367 (patch)
tree55cf35156794ab34d8a607c25fd044c37231f9e4 /include
parenta57de0b4336e48db2811a2030bb68dba8dd09d88 (diff)
memory barrier: adding smp_mb__after_lock
Adding smp_mb__after_lock define to be used as a smp_mb call after a lock. Making it nop for x86, since {read|write|spin}_lock() on x86 are full memory barriers. Signed-off-by: Jiri Olsa <jolsa@redhat.com> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/spinlock.h5
-rw-r--r--include/net/sock.h5
2 files changed, 9 insertions, 1 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 252b245cfcf4..4be57ab03478 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -132,6 +132,11 @@ do { \
132#endif /*__raw_spin_is_contended*/ 132#endif /*__raw_spin_is_contended*/
133#endif 133#endif
134 134
135/* The lock does not imply full memory barrier. */
136#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
137static inline void smp_mb__after_lock(void) { smp_mb(); }
138#endif
139
135/** 140/**
136 * spin_unlock_wait - wait until the spinlock gets unlocked 141 * spin_unlock_wait - wait until the spinlock gets unlocked
137 * @lock: the spinlock in question. 142 * @lock: the spinlock in question.
diff --git a/include/net/sock.h b/include/net/sock.h
index 4eb8409249f6..2c0da9239b95 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1271,6 +1271,9 @@ static inline int sk_has_allocations(const struct sock *sk)
1271 * in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1 1271 * in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1
1272 * could then endup calling schedule and sleep forever if there are no more 1272 * could then endup calling schedule and sleep forever if there are no more
1273 * data on the socket. 1273 * data on the socket.
1274 *
1275 * The sk_has_sleeper is always called right after a call to read_lock, so we
1276 * can use smp_mb__after_lock barrier.
1274 */ 1277 */
1275static inline int sk_has_sleeper(struct sock *sk) 1278static inline int sk_has_sleeper(struct sock *sk)
1276{ 1279{
@@ -1280,7 +1283,7 @@ static inline int sk_has_sleeper(struct sock *sk)
1280 * 1283 *
1281 * This memory barrier is paired in the sock_poll_wait. 1284 * This memory barrier is paired in the sock_poll_wait.
1282 */ 1285 */
1283 smp_mb(); 1286 smp_mb__after_lock();
1284 return sk->sk_sleep && waitqueue_active(sk->sk_sleep); 1287 return sk->sk_sleep && waitqueue_active(sk->sk_sleep);
1285} 1288}
1286 1289