aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-05-26 15:20:18 -0400
committerDavid S. Miller <davem@davemloft.net>2010-05-27 03:30:53 -0400
commit8a74ad60a546b13bd1096b2a61a7a5c6fd9ae17c (patch)
tree3110e7e59883597b5d0f617e8507e15b8f965f3f /net/core
parenta56635a56f2afb3d22d9ce07e8f8d69537416b2d (diff)
net: fix lock_sock_bh/unlock_sock_bh
This new sock lock primitive was introduced to speedup some user context socket manipulation. But it is unsafe to protect two threads, one using regular lock_sock/release_sock, one using lock_sock_bh/unlock_sock_bh This patch changes lock_sock_bh to be careful against 'owned' state. If owned is found to be set, we must take the slow path. lock_sock_bh() now returns a boolean to say if the slow path was taken, and this boolean is used at unlock_sock_bh time to call the appropriate unlock function. After this change, BH are either disabled or enabled during the lock_sock_bh/unlock_sock_bh protected section. This might be misleading, so we rename these functions to lock_sock_fast()/unlock_sock_fast(). Reported-by: Anton Blanchard <anton@samba.org> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Tested-by: Anton Blanchard <anton@samba.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/datagram.c6
-rw-r--r--net/core/sock.c33
2 files changed, 37 insertions, 2 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index e0097531417a..f5b6f43a4c2e 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -229,15 +229,17 @@ EXPORT_SYMBOL(skb_free_datagram);
229 229
230void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) 230void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
231{ 231{
232 bool slow;
233
232 if (likely(atomic_read(&skb->users) == 1)) 234 if (likely(atomic_read(&skb->users) == 1))
233 smp_rmb(); 235 smp_rmb();
234 else if (likely(!atomic_dec_and_test(&skb->users))) 236 else if (likely(!atomic_dec_and_test(&skb->users)))
235 return; 237 return;
236 238
237 lock_sock_bh(sk); 239 slow = lock_sock_fast(sk);
238 skb_orphan(skb); 240 skb_orphan(skb);
239 sk_mem_reclaim_partial(sk); 241 sk_mem_reclaim_partial(sk);
240 unlock_sock_bh(sk); 242 unlock_sock_fast(sk, slow);
241 243
242 /* skb is now orphaned, can be freed outside of locked section */ 244 /* skb is now orphaned, can be freed outside of locked section */
243 __kfree_skb(skb); 245 __kfree_skb(skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index 37fe9b6adade..2cf7f9f7e775 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2007,6 +2007,39 @@ void release_sock(struct sock *sk)
2007} 2007}
2008EXPORT_SYMBOL(release_sock); 2008EXPORT_SYMBOL(release_sock);
2009 2009
2010/**
2011 * lock_sock_fast - fast version of lock_sock
2012 * @sk: socket
2013 *
2014 * This version should be used for very small section, where process wont block
2015 * return false if fast path is taken
2016 * sk_lock.slock locked, owned = 0, BH disabled
2017 * return true if slow path is taken
2018 * sk_lock.slock unlocked, owned = 1, BH enabled
2019 */
2020bool lock_sock_fast(struct sock *sk)
2021{
2022 might_sleep();
2023 spin_lock_bh(&sk->sk_lock.slock);
2024
2025 if (!sk->sk_lock.owned)
2026 /*
2027 * Note : We must disable BH
2028 */
2029 return false;
2030
2031 __lock_sock(sk);
2032 sk->sk_lock.owned = 1;
2033 spin_unlock(&sk->sk_lock.slock);
2034 /*
2035 * The sk_lock has mutex_lock() semantics here:
2036 */
2037 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2038 local_bh_enable();
2039 return true;
2040}
2041EXPORT_SYMBOL(lock_sock_fast);
2042
2010int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 2043int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2011{ 2044{
2012 struct timeval tv; 2045 struct timeval tv;