diff options
| author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-05-26 15:20:18 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2010-05-27 03:30:53 -0400 |
| commit | 8a74ad60a546b13bd1096b2a61a7a5c6fd9ae17c (patch) | |
| tree | 3110e7e59883597b5d0f617e8507e15b8f965f3f /net/ipv4/udp.c | |
| parent | a56635a56f2afb3d22d9ce07e8f8d69537416b2d (diff) | |
net: fix lock_sock_bh/unlock_sock_bh
This new sock lock primitive was introduced to speedup some user context
socket manipulation. But it is unsafe to protect two threads, one using
regular lock_sock/release_sock, one using lock_sock_bh/unlock_sock_bh
This patch changes lock_sock_bh to be careful against 'owned' state.
If owned is found to be set, we must take the slow path.
lock_sock_bh() now returns a boolean to say if the slow path was taken,
and this boolean is used at unlock_sock_bh time to call the appropriate
unlock function.
After this change, BH are either disabled or enabled during the
lock_sock_bh/unlock_sock_bh protected section. This might be misleading,
so we rename these functions to lock_sock_fast()/unlock_sock_fast().
Reported-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Tested-by: Anton Blanchard <anton@samba.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/udp.c')
| -rw-r--r-- | net/ipv4/udp.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 9de6a698f91d..b9d0d409516f 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
| @@ -1063,10 +1063,11 @@ static unsigned int first_packet_length(struct sock *sk) | |||
| 1063 | spin_unlock_bh(&rcvq->lock); | 1063 | spin_unlock_bh(&rcvq->lock); |
| 1064 | 1064 | ||
| 1065 | if (!skb_queue_empty(&list_kill)) { | 1065 | if (!skb_queue_empty(&list_kill)) { |
| 1066 | lock_sock_bh(sk); | 1066 | bool slow = lock_sock_fast(sk); |
| 1067 | |||
| 1067 | __skb_queue_purge(&list_kill); | 1068 | __skb_queue_purge(&list_kill); |
| 1068 | sk_mem_reclaim_partial(sk); | 1069 | sk_mem_reclaim_partial(sk); |
| 1069 | unlock_sock_bh(sk); | 1070 | unlock_sock_fast(sk, slow); |
| 1070 | } | 1071 | } |
| 1071 | return res; | 1072 | return res; |
| 1072 | } | 1073 | } |
| @@ -1123,6 +1124,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
| 1123 | int peeked; | 1124 | int peeked; |
| 1124 | int err; | 1125 | int err; |
| 1125 | int is_udplite = IS_UDPLITE(sk); | 1126 | int is_udplite = IS_UDPLITE(sk); |
| 1127 | bool slow; | ||
| 1126 | 1128 | ||
| 1127 | /* | 1129 | /* |
| 1128 | * Check any passed addresses | 1130 | * Check any passed addresses |
| @@ -1197,10 +1199,10 @@ out: | |||
| 1197 | return err; | 1199 | return err; |
| 1198 | 1200 | ||
| 1199 | csum_copy_err: | 1201 | csum_copy_err: |
| 1200 | lock_sock_bh(sk); | 1202 | slow = lock_sock_fast(sk); |
| 1201 | if (!skb_kill_datagram(sk, skb, flags)) | 1203 | if (!skb_kill_datagram(sk, skb, flags)) |
| 1202 | UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | 1204 | UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
| 1203 | unlock_sock_bh(sk); | 1205 | unlock_sock_fast(sk, slow); |
| 1204 | 1206 | ||
| 1205 | if (noblock) | 1207 | if (noblock) |
| 1206 | return -EAGAIN; | 1208 | return -EAGAIN; |
| @@ -1625,9 +1627,9 @@ int udp_rcv(struct sk_buff *skb) | |||
| 1625 | 1627 | ||
| 1626 | void udp_destroy_sock(struct sock *sk) | 1628 | void udp_destroy_sock(struct sock *sk) |
| 1627 | { | 1629 | { |
| 1628 | lock_sock_bh(sk); | 1630 | bool slow = lock_sock_fast(sk); |
| 1629 | udp_flush_pending_frames(sk); | 1631 | udp_flush_pending_frames(sk); |
| 1630 | unlock_sock_bh(sk); | 1632 | unlock_sock_fast(sk, slow); |
| 1631 | } | 1633 | } |
| 1632 | 1634 | ||
| 1633 | /* | 1635 | /* |
