diff options
Diffstat (limited to 'net/ipv4/udp.c')
-rw-r--r-- | net/ipv4/udp.c | 69 |
1 files changed, 40 insertions, 29 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 8e42fbbd5761..c83d0ef469c9 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -302,6 +302,13 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, | |||
302 | return result; | 302 | return result; |
303 | } | 303 | } |
304 | 304 | ||
305 | struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, | ||
306 | __be32 daddr, __be16 dport, int dif) | ||
307 | { | ||
308 | return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, udp_hash); | ||
309 | } | ||
310 | EXPORT_SYMBOL_GPL(udp4_lib_lookup); | ||
311 | |||
305 | static inline struct sock *udp_v4_mcast_next(struct sock *sk, | 312 | static inline struct sock *udp_v4_mcast_next(struct sock *sk, |
306 | __be16 loc_port, __be32 loc_addr, | 313 | __be16 loc_port, __be32 loc_addr, |
307 | __be16 rmt_port, __be32 rmt_addr, | 314 | __be16 rmt_port, __be32 rmt_addr, |
@@ -951,6 +958,27 @@ int udp_disconnect(struct sock *sk, int flags) | |||
951 | return 0; | 958 | return 0; |
952 | } | 959 | } |
953 | 960 | ||
961 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | ||
962 | { | ||
963 | int is_udplite = IS_UDPLITE(sk); | ||
964 | int rc; | ||
965 | |||
966 | if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) { | ||
967 | /* Note that an ENOMEM error is charged twice */ | ||
968 | if (rc == -ENOMEM) | ||
969 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, | ||
970 | is_udplite); | ||
971 | goto drop; | ||
972 | } | ||
973 | |||
974 | return 0; | ||
975 | |||
976 | drop: | ||
977 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | ||
978 | kfree_skb(skb); | ||
979 | return -1; | ||
980 | } | ||
981 | |||
954 | /* returns: | 982 | /* returns: |
955 | * -1: error | 983 | * -1: error |
956 | * 0: success | 984 | * 0: success |
@@ -989,9 +1017,7 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | |||
989 | up->encap_rcv != NULL) { | 1017 | up->encap_rcv != NULL) { |
990 | int ret; | 1018 | int ret; |
991 | 1019 | ||
992 | bh_unlock_sock(sk); | ||
993 | ret = (*up->encap_rcv)(sk, skb); | 1020 | ret = (*up->encap_rcv)(sk, skb); |
994 | bh_lock_sock(sk); | ||
995 | if (ret <= 0) { | 1021 | if (ret <= 0) { |
996 | UDP_INC_STATS_BH(sock_net(sk), | 1022 | UDP_INC_STATS_BH(sock_net(sk), |
997 | UDP_MIB_INDATAGRAMS, | 1023 | UDP_MIB_INDATAGRAMS, |
@@ -1044,17 +1070,16 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | |||
1044 | goto drop; | 1070 | goto drop; |
1045 | } | 1071 | } |
1046 | 1072 | ||
1047 | if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { | 1073 | rc = 0; |
1048 | /* Note that an ENOMEM error is charged twice */ | ||
1049 | if (rc == -ENOMEM) { | ||
1050 | UDP_INC_STATS_BH(sock_net(sk), | ||
1051 | UDP_MIB_RCVBUFERRORS, is_udplite); | ||
1052 | atomic_inc(&sk->sk_drops); | ||
1053 | } | ||
1054 | goto drop; | ||
1055 | } | ||
1056 | 1074 | ||
1057 | return 0; | 1075 | bh_lock_sock(sk); |
1076 | if (!sock_owned_by_user(sk)) | ||
1077 | rc = __udp_queue_rcv_skb(sk, skb); | ||
1078 | else | ||
1079 | sk_add_backlog(sk, skb); | ||
1080 | bh_unlock_sock(sk); | ||
1081 | |||
1082 | return rc; | ||
1058 | 1083 | ||
1059 | drop: | 1084 | drop: |
1060 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | 1085 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
@@ -1092,15 +1117,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | |||
1092 | skb1 = skb_clone(skb, GFP_ATOMIC); | 1117 | skb1 = skb_clone(skb, GFP_ATOMIC); |
1093 | 1118 | ||
1094 | if (skb1) { | 1119 | if (skb1) { |
1095 | int ret = 0; | 1120 | int ret = udp_queue_rcv_skb(sk, skb1); |
1096 | |||
1097 | bh_lock_sock(sk); | ||
1098 | if (!sock_owned_by_user(sk)) | ||
1099 | ret = udp_queue_rcv_skb(sk, skb1); | ||
1100 | else | ||
1101 | sk_add_backlog(sk, skb1); | ||
1102 | bh_unlock_sock(sk); | ||
1103 | |||
1104 | if (ret > 0) | 1121 | if (ret > 0) |
1105 | /* we should probably re-process instead | 1122 | /* we should probably re-process instead |
1106 | * of dropping packets here. */ | 1123 | * of dropping packets here. */ |
@@ -1195,13 +1212,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], | |||
1195 | uh->dest, inet_iif(skb), udptable); | 1212 | uh->dest, inet_iif(skb), udptable); |
1196 | 1213 | ||
1197 | if (sk != NULL) { | 1214 | if (sk != NULL) { |
1198 | int ret = 0; | 1215 | int ret = udp_queue_rcv_skb(sk, skb); |
1199 | bh_lock_sock(sk); | ||
1200 | if (!sock_owned_by_user(sk)) | ||
1201 | ret = udp_queue_rcv_skb(sk, skb); | ||
1202 | else | ||
1203 | sk_add_backlog(sk, skb); | ||
1204 | bh_unlock_sock(sk); | ||
1205 | sock_put(sk); | 1216 | sock_put(sk); |
1206 | 1217 | ||
1207 | /* a return value > 0 means to resubmit the input, but | 1218 | /* a return value > 0 means to resubmit the input, but |
@@ -1494,7 +1505,7 @@ struct proto udp_prot = { | |||
1494 | .sendmsg = udp_sendmsg, | 1505 | .sendmsg = udp_sendmsg, |
1495 | .recvmsg = udp_recvmsg, | 1506 | .recvmsg = udp_recvmsg, |
1496 | .sendpage = udp_sendpage, | 1507 | .sendpage = udp_sendpage, |
1497 | .backlog_rcv = udp_queue_rcv_skb, | 1508 | .backlog_rcv = __udp_queue_rcv_skb, |
1498 | .hash = udp_lib_hash, | 1509 | .hash = udp_lib_hash, |
1499 | .unhash = udp_lib_unhash, | 1510 | .unhash = udp_lib_unhash, |
1500 | .get_port = udp_v4_get_port, | 1511 | .get_port = udp_v4_get_port, |