aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/udp.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/udp.c')
-rw-r--r--net/ipv4/udp.c130
1 files changed, 94 insertions, 36 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f0126fdd7e04..fb23c2e63b52 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -95,6 +95,7 @@
95#include <linux/mm.h> 95#include <linux/mm.h>
96#include <linux/inet.h> 96#include <linux/inet.h>
97#include <linux/netdevice.h> 97#include <linux/netdevice.h>
98#include <linux/slab.h>
98#include <net/tcp_states.h> 99#include <net/tcp_states.h>
99#include <linux/skbuff.h> 100#include <linux/skbuff.h>
100#include <linux/proc_fs.h> 101#include <linux/proc_fs.h>
@@ -232,7 +233,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
232 */ 233 */
233 do { 234 do {
234 if (low <= snum && snum <= high && 235 if (low <= snum && snum <= high &&
235 !test_bit(snum >> udptable->log, bitmap)) 236 !test_bit(snum >> udptable->log, bitmap) &&
237 !inet_is_reserved_local_port(snum))
236 goto found; 238 goto found;
237 snum += rand; 239 snum += rand;
238 } while (snum != first); 240 } while (snum != first);
@@ -306,13 +308,13 @@ static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
306static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr, 308static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr,
307 unsigned int port) 309 unsigned int port)
308{ 310{
309 return jhash_1word(saddr, net_hash_mix(net)) ^ port; 311 return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
310} 312}
311 313
312int udp_v4_get_port(struct sock *sk, unsigned short snum) 314int udp_v4_get_port(struct sock *sk, unsigned short snum)
313{ 315{
314 unsigned int hash2_nulladdr = 316 unsigned int hash2_nulladdr =
315 udp4_portaddr_hash(sock_net(sk), INADDR_ANY, snum); 317 udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
316 unsigned int hash2_partial = 318 unsigned int hash2_partial =
317 udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); 319 udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
318 320
@@ -465,14 +467,14 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
465 daddr, hnum, dif, 467 daddr, hnum, dif,
466 hslot2, slot2); 468 hslot2, slot2);
467 if (!result) { 469 if (!result) {
468 hash2 = udp4_portaddr_hash(net, INADDR_ANY, hnum); 470 hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
469 slot2 = hash2 & udptable->mask; 471 slot2 = hash2 & udptable->mask;
470 hslot2 = &udptable->hash2[slot2]; 472 hslot2 = &udptable->hash2[slot2];
471 if (hslot->count < hslot2->count) 473 if (hslot->count < hslot2->count)
472 goto begin; 474 goto begin;
473 475
474 result = udp4_lib_lookup2(net, INADDR_ANY, sport, 476 result = udp4_lib_lookup2(net, saddr, sport,
475 daddr, hnum, dif, 477 htonl(INADDR_ANY), hnum, dif,
476 hslot2, slot2); 478 hslot2, slot2);
477 } 479 }
478 rcu_read_unlock(); 480 rcu_read_unlock();
@@ -631,9 +633,9 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
631 if (!inet->recverr) { 633 if (!inet->recverr) {
632 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 634 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
633 goto out; 635 goto out;
634 } else { 636 } else
635 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); 637 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
636 } 638
637 sk->sk_err = err; 639 sk->sk_err = err;
638 sk->sk_error_report(sk); 640 sk->sk_error_report(sk);
639out: 641out:
@@ -912,7 +914,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
912 !sock_flag(sk, SOCK_BROADCAST)) 914 !sock_flag(sk, SOCK_BROADCAST))
913 goto out; 915 goto out;
914 if (connected) 916 if (connected)
915 sk_dst_set(sk, dst_clone(&rt->u.dst)); 917 sk_dst_set(sk, dst_clone(&rt->dst));
916 } 918 }
917 919
918 if (msg->msg_flags&MSG_CONFIRM) 920 if (msg->msg_flags&MSG_CONFIRM)
@@ -976,7 +978,7 @@ out:
976 return err; 978 return err;
977 979
978do_confirm: 980do_confirm:
979 dst_confirm(&rt->u.dst); 981 dst_confirm(&rt->dst);
980 if (!(msg->msg_flags&MSG_PROBE) || len) 982 if (!(msg->msg_flags&MSG_PROBE) || len)
981 goto back_from_confirm; 983 goto back_from_confirm;
982 err = 0; 984 err = 0;
@@ -1061,10 +1063,11 @@ static unsigned int first_packet_length(struct sock *sk)
1061 spin_unlock_bh(&rcvq->lock); 1063 spin_unlock_bh(&rcvq->lock);
1062 1064
1063 if (!skb_queue_empty(&list_kill)) { 1065 if (!skb_queue_empty(&list_kill)) {
1064 lock_sock(sk); 1066 bool slow = lock_sock_fast(sk);
1067
1065 __skb_queue_purge(&list_kill); 1068 __skb_queue_purge(&list_kill);
1066 sk_mem_reclaim_partial(sk); 1069 sk_mem_reclaim_partial(sk);
1067 release_sock(sk); 1070 unlock_sock_fast(sk, slow);
1068 } 1071 }
1069 return res; 1072 return res;
1070} 1073}
@@ -1117,10 +1120,11 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1117 struct inet_sock *inet = inet_sk(sk); 1120 struct inet_sock *inet = inet_sk(sk);
1118 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; 1121 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
1119 struct sk_buff *skb; 1122 struct sk_buff *skb;
1120 unsigned int ulen, copied; 1123 unsigned int ulen;
1121 int peeked; 1124 int peeked;
1122 int err; 1125 int err;
1123 int is_udplite = IS_UDPLITE(sk); 1126 int is_udplite = IS_UDPLITE(sk);
1127 bool slow;
1124 1128
1125 /* 1129 /*
1126 * Check any passed addresses 1130 * Check any passed addresses
@@ -1138,10 +1142,9 @@ try_again:
1138 goto out; 1142 goto out;
1139 1143
1140 ulen = skb->len - sizeof(struct udphdr); 1144 ulen = skb->len - sizeof(struct udphdr);
1141 copied = len; 1145 if (len > ulen)
1142 if (copied > ulen) 1146 len = ulen;
1143 copied = ulen; 1147 else if (len < ulen)
1144 else if (copied < ulen)
1145 msg->msg_flags |= MSG_TRUNC; 1148 msg->msg_flags |= MSG_TRUNC;
1146 1149
1147 /* 1150 /*
@@ -1150,14 +1153,14 @@ try_again:
1150 * coverage checksum (UDP-Lite), do it before the copy. 1153 * coverage checksum (UDP-Lite), do it before the copy.
1151 */ 1154 */
1152 1155
1153 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { 1156 if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
1154 if (udp_lib_checksum_complete(skb)) 1157 if (udp_lib_checksum_complete(skb))
1155 goto csum_copy_err; 1158 goto csum_copy_err;
1156 } 1159 }
1157 1160
1158 if (skb_csum_unnecessary(skb)) 1161 if (skb_csum_unnecessary(skb))
1159 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), 1162 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
1160 msg->msg_iov, copied); 1163 msg->msg_iov, len);
1161 else { 1164 else {
1162 err = skb_copy_and_csum_datagram_iovec(skb, 1165 err = skb_copy_and_csum_datagram_iovec(skb,
1163 sizeof(struct udphdr), 1166 sizeof(struct udphdr),
@@ -1186,7 +1189,7 @@ try_again:
1186 if (inet->cmsg_flags) 1189 if (inet->cmsg_flags)
1187 ip_cmsg_recv(msg, skb); 1190 ip_cmsg_recv(msg, skb);
1188 1191
1189 err = copied; 1192 err = len;
1190 if (flags & MSG_TRUNC) 1193 if (flags & MSG_TRUNC)
1191 err = ulen; 1194 err = ulen;
1192 1195
@@ -1196,10 +1199,10 @@ out:
1196 return err; 1199 return err;
1197 1200
1198csum_copy_err: 1201csum_copy_err:
1199 lock_sock(sk); 1202 slow = lock_sock_fast(sk);
1200 if (!skb_kill_datagram(sk, skb, flags)) 1203 if (!skb_kill_datagram(sk, skb, flags))
1201 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1204 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1202 release_sock(sk); 1205 unlock_sock_fast(sk, slow);
1203 1206
1204 if (noblock) 1207 if (noblock)
1205 return -EAGAIN; 1208 return -EAGAIN;
@@ -1217,6 +1220,7 @@ int udp_disconnect(struct sock *sk, int flags)
1217 sk->sk_state = TCP_CLOSE; 1220 sk->sk_state = TCP_CLOSE;
1218 inet->inet_daddr = 0; 1221 inet->inet_daddr = 0;
1219 inet->inet_dport = 0; 1222 inet->inet_dport = 0;
1223 sock_rps_save_rxhash(sk, 0);
1220 sk->sk_bound_dev_if = 0; 1224 sk->sk_bound_dev_if = 0;
1221 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 1225 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1222 inet_reset_saddr(sk); 1226 inet_reset_saddr(sk);
@@ -1256,10 +1260,57 @@ void udp_lib_unhash(struct sock *sk)
1256} 1260}
1257EXPORT_SYMBOL(udp_lib_unhash); 1261EXPORT_SYMBOL(udp_lib_unhash);
1258 1262
1263/*
1264 * inet_rcv_saddr was changed, we must rehash secondary hash
1265 */
1266void udp_lib_rehash(struct sock *sk, u16 newhash)
1267{
1268 if (sk_hashed(sk)) {
1269 struct udp_table *udptable = sk->sk_prot->h.udp_table;
1270 struct udp_hslot *hslot, *hslot2, *nhslot2;
1271
1272 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
1273 nhslot2 = udp_hashslot2(udptable, newhash);
1274 udp_sk(sk)->udp_portaddr_hash = newhash;
1275 if (hslot2 != nhslot2) {
1276 hslot = udp_hashslot(udptable, sock_net(sk),
1277 udp_sk(sk)->udp_port_hash);
1278 /* we must lock primary chain too */
1279 spin_lock_bh(&hslot->lock);
1280
1281 spin_lock(&hslot2->lock);
1282 hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
1283 hslot2->count--;
1284 spin_unlock(&hslot2->lock);
1285
1286 spin_lock(&nhslot2->lock);
1287 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
1288 &nhslot2->head);
1289 nhslot2->count++;
1290 spin_unlock(&nhslot2->lock);
1291
1292 spin_unlock_bh(&hslot->lock);
1293 }
1294 }
1295}
1296EXPORT_SYMBOL(udp_lib_rehash);
1297
1298static void udp_v4_rehash(struct sock *sk)
1299{
1300 u16 new_hash = udp4_portaddr_hash(sock_net(sk),
1301 inet_sk(sk)->inet_rcv_saddr,
1302 inet_sk(sk)->inet_num);
1303 udp_lib_rehash(sk, new_hash);
1304}
1305
1259static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1306static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1260{ 1307{
1261 int rc = sock_queue_rcv_skb(sk, skb); 1308 int rc;
1262 1309
1310 if (inet_sk(sk)->inet_daddr)
1311 sock_rps_save_rxhash(sk, skb->rxhash);
1312
1313 rc = ip_queue_rcv_skb(sk, skb);
1263 if (rc < 0) { 1314 if (rc < 0) {
1264 int is_udplite = IS_UDPLITE(sk); 1315 int is_udplite = IS_UDPLITE(sk);
1265 1316
@@ -1367,13 +1418,19 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1367 goto drop; 1418 goto drop;
1368 } 1419 }
1369 1420
1421
1422 if (sk_rcvqueues_full(sk, skb))
1423 goto drop;
1424
1370 rc = 0; 1425 rc = 0;
1371 1426
1372 bh_lock_sock(sk); 1427 bh_lock_sock(sk);
1373 if (!sock_owned_by_user(sk)) 1428 if (!sock_owned_by_user(sk))
1374 rc = __udp_queue_rcv_skb(sk, skb); 1429 rc = __udp_queue_rcv_skb(sk, skb);
1375 else 1430 else if (sk_add_backlog(sk, skb)) {
1376 sk_add_backlog(sk, skb); 1431 bh_unlock_sock(sk);
1432 goto drop;
1433 }
1377 bh_unlock_sock(sk); 1434 bh_unlock_sock(sk);
1378 1435
1379 return rc; 1436 return rc;
@@ -1525,6 +1582,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1525 1582
1526 uh = udp_hdr(skb); 1583 uh = udp_hdr(skb);
1527 ulen = ntohs(uh->len); 1584 ulen = ntohs(uh->len);
1585 saddr = ip_hdr(skb)->saddr;
1586 daddr = ip_hdr(skb)->daddr;
1587
1528 if (ulen > skb->len) 1588 if (ulen > skb->len)
1529 goto short_packet; 1589 goto short_packet;
1530 1590
@@ -1538,9 +1598,6 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1538 if (udp4_csum_init(skb, uh, proto)) 1598 if (udp4_csum_init(skb, uh, proto))
1539 goto csum_error; 1599 goto csum_error;
1540 1600
1541 saddr = ip_hdr(skb)->saddr;
1542 daddr = ip_hdr(skb)->daddr;
1543
1544 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) 1601 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1545 return __udp4_lib_mcast_deliver(net, skb, uh, 1602 return __udp4_lib_mcast_deliver(net, skb, uh,
1546 saddr, daddr, udptable); 1603 saddr, daddr, udptable);
@@ -1613,9 +1670,9 @@ int udp_rcv(struct sk_buff *skb)
1613 1670
1614void udp_destroy_sock(struct sock *sk) 1671void udp_destroy_sock(struct sock *sk)
1615{ 1672{
1616 lock_sock(sk); 1673 bool slow = lock_sock_fast(sk);
1617 udp_flush_pending_frames(sk); 1674 udp_flush_pending_frames(sk);
1618 release_sock(sk); 1675 unlock_sock_fast(sk, slow);
1619} 1676}
1620 1677
1621/* 1678/*
@@ -1674,8 +1731,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1674 return -ENOPROTOOPT; 1731 return -ENOPROTOOPT;
1675 if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ 1732 if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
1676 val = 8; 1733 val = 8;
1677 else if (val > USHORT_MAX) 1734 else if (val > USHRT_MAX)
1678 val = USHORT_MAX; 1735 val = USHRT_MAX;
1679 up->pcslen = val; 1736 up->pcslen = val;
1680 up->pcflag |= UDPLITE_SEND_CC; 1737 up->pcflag |= UDPLITE_SEND_CC;
1681 break; 1738 break;
@@ -1688,8 +1745,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1688 return -ENOPROTOOPT; 1745 return -ENOPROTOOPT;
1689 if (val != 0 && val < 8) /* Avoid silly minimal values. */ 1746 if (val != 0 && val < 8) /* Avoid silly minimal values. */
1690 val = 8; 1747 val = 8;
1691 else if (val > USHORT_MAX) 1748 else if (val > USHRT_MAX)
1692 val = USHORT_MAX; 1749 val = USHRT_MAX;
1693 up->pcrlen = val; 1750 up->pcrlen = val;
1694 up->pcflag |= UDPLITE_RECV_CC; 1751 up->pcflag |= UDPLITE_RECV_CC;
1695 break; 1752 break;
@@ -1829,6 +1886,7 @@ struct proto udp_prot = {
1829 .backlog_rcv = __udp_queue_rcv_skb, 1886 .backlog_rcv = __udp_queue_rcv_skb,
1830 .hash = udp_lib_hash, 1887 .hash = udp_lib_hash,
1831 .unhash = udp_lib_unhash, 1888 .unhash = udp_lib_unhash,
1889 .rehash = udp_v4_rehash,
1832 .get_port = udp_v4_get_port, 1890 .get_port = udp_v4_get_port,
1833 .memory_allocated = &udp_memory_allocated, 1891 .memory_allocated = &udp_memory_allocated,
1834 .sysctl_mem = sysctl_udp_mem, 1892 .sysctl_mem = sysctl_udp_mem,
@@ -2027,12 +2085,12 @@ static struct udp_seq_afinfo udp4_seq_afinfo = {
2027 }, 2085 },
2028}; 2086};
2029 2087
2030static int udp4_proc_init_net(struct net *net) 2088static int __net_init udp4_proc_init_net(struct net *net)
2031{ 2089{
2032 return udp_proc_register(net, &udp4_seq_afinfo); 2090 return udp_proc_register(net, &udp4_seq_afinfo);
2033} 2091}
2034 2092
2035static void udp4_proc_exit_net(struct net *net) 2093static void __net_exit udp4_proc_exit_net(struct net *net)
2036{ 2094{
2037 udp_proc_unregister(net, &udp4_seq_afinfo); 2095 udp_proc_unregister(net, &udp4_seq_afinfo);
2038} 2096}