diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/udp.c | 6 | ||||
-rw-r--r-- | net/ipv6/udp.c | 6 |
2 files changed, 7 insertions, 5 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 383d17359d01..8e42fbbd5761 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -989,7 +989,9 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | |||
989 | up->encap_rcv != NULL) { | 989 | up->encap_rcv != NULL) { |
990 | int ret; | 990 | int ret; |
991 | 991 | ||
992 | bh_unlock_sock(sk); | ||
992 | ret = (*up->encap_rcv)(sk, skb); | 993 | ret = (*up->encap_rcv)(sk, skb); |
994 | bh_lock_sock(sk); | ||
993 | if (ret <= 0) { | 995 | if (ret <= 0) { |
994 | UDP_INC_STATS_BH(sock_net(sk), | 996 | UDP_INC_STATS_BH(sock_net(sk), |
995 | UDP_MIB_INDATAGRAMS, | 997 | UDP_MIB_INDATAGRAMS, |
@@ -1092,7 +1094,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | |||
1092 | if (skb1) { | 1094 | if (skb1) { |
1093 | int ret = 0; | 1095 | int ret = 0; |
1094 | 1096 | ||
1095 | bh_lock_sock_nested(sk); | 1097 | bh_lock_sock(sk); |
1096 | if (!sock_owned_by_user(sk)) | 1098 | if (!sock_owned_by_user(sk)) |
1097 | ret = udp_queue_rcv_skb(sk, skb1); | 1099 | ret = udp_queue_rcv_skb(sk, skb1); |
1098 | else | 1100 | else |
@@ -1194,7 +1196,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], | |||
1194 | 1196 | ||
1195 | if (sk != NULL) { | 1197 | if (sk != NULL) { |
1196 | int ret = 0; | 1198 | int ret = 0; |
1197 | bh_lock_sock_nested(sk); | 1199 | bh_lock_sock(sk); |
1198 | if (!sock_owned_by_user(sk)) | 1200 | if (!sock_owned_by_user(sk)) |
1199 | ret = udp_queue_rcv_skb(sk, skb); | 1201 | ret = udp_queue_rcv_skb(sk, skb); |
1200 | else | 1202 | else |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index d1477b350f76..a6aecf76a71b 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -379,7 +379,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | |||
379 | uh->source, saddr, dif))) { | 379 | uh->source, saddr, dif))) { |
380 | struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC); | 380 | struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC); |
381 | if (buff) { | 381 | if (buff) { |
382 | bh_lock_sock_nested(sk2); | 382 | bh_lock_sock(sk2); |
383 | if (!sock_owned_by_user(sk2)) | 383 | if (!sock_owned_by_user(sk2)) |
384 | udpv6_queue_rcv_skb(sk2, buff); | 384 | udpv6_queue_rcv_skb(sk2, buff); |
385 | else | 385 | else |
@@ -387,7 +387,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | |||
387 | bh_unlock_sock(sk2); | 387 | bh_unlock_sock(sk2); |
388 | } | 388 | } |
389 | } | 389 | } |
390 | bh_lock_sock_nested(sk); | 390 | bh_lock_sock(sk); |
391 | if (!sock_owned_by_user(sk)) | 391 | if (!sock_owned_by_user(sk)) |
392 | udpv6_queue_rcv_skb(sk, skb); | 392 | udpv6_queue_rcv_skb(sk, skb); |
393 | else | 393 | else |
@@ -508,7 +508,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], | |||
508 | 508 | ||
509 | /* deliver */ | 509 | /* deliver */ |
510 | 510 | ||
511 | bh_lock_sock_nested(sk); | 511 | bh_lock_sock(sk); |
512 | if (!sock_owned_by_user(sk)) | 512 | if (!sock_owned_by_user(sk)) |
513 | udpv6_queue_rcv_skb(sk, skb); | 513 | udpv6_queue_rcv_skb(sk, skb); |
514 | else | 514 | else |