aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2010-05-10 12:39:28 -0400
committerPatrick McHardy <kaber@trash.net>2010-05-10 12:39:28 -0400
commit1e4b1057121bc756b91758a434b504d2010f6088 (patch)
treeb016cf2c728289c7e36d9e4e488f30ab0bd0ae6e /net/ipv4
parent3b254c54ec46eb022cb26ee6ab37fae23f5f7d6a (diff)
parent3ee943728fff536edaf8f59faa58aaa1aa7366e3 (diff)
Merge branch 'master' of /repos/git/net-next-2.6
Conflicts: net/bridge/br_device.c net/bridge/br_forward.c Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/af_inet.c22
-rw-r--r--net/ipv4/fib_rules.c4
-rw-r--r--net/ipv4/fib_trie.c4
-rw-r--r--net/ipv4/inet_connection_sock.c4
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ip_sockglue.c16
-rw-r--r--net/ipv4/ipmr.c110
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c137
-rw-r--r--net/ipv4/tcp.c19
-rw-r--r--net/ipv4/tcp_input.c1
-rw-r--r--net/ipv4/tcp_ipv4.c6
-rw-r--r--net/ipv4/tcp_output.c9
-rw-r--r--net/ipv4/tcp_timer.c4
-rw-r--r--net/ipv4/udp.c30
15 files changed, 192 insertions, 178 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index c5376c725503..c6c43bcd1c6f 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -419,7 +419,7 @@ int inet_release(struct socket *sock)
419 if (sk) { 419 if (sk) {
420 long timeout; 420 long timeout;
421 421
422 inet_rps_reset_flow(sk); 422 sock_rps_reset_flow(sk);
423 423
424 /* Applications forget to leave groups before exiting */ 424 /* Applications forget to leave groups before exiting */
425 ip_mc_drop_socket(sk); 425 ip_mc_drop_socket(sk);
@@ -548,7 +548,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
548{ 548{
549 DEFINE_WAIT(wait); 549 DEFINE_WAIT(wait);
550 550
551 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 551 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
552 552
553 /* Basic assumption: if someone sets sk->sk_err, he _must_ 553 /* Basic assumption: if someone sets sk->sk_err, he _must_
554 * change state of the socket from TCP_SYN_*. 554 * change state of the socket from TCP_SYN_*.
@@ -561,9 +561,9 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
561 lock_sock(sk); 561 lock_sock(sk);
562 if (signal_pending(current) || !timeo) 562 if (signal_pending(current) || !timeo)
563 break; 563 break;
564 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 564 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
565 } 565 }
566 finish_wait(sk->sk_sleep, &wait); 566 finish_wait(sk_sleep(sk), &wait);
567 return timeo; 567 return timeo;
568} 568}
569 569
@@ -722,7 +722,7 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
722{ 722{
723 struct sock *sk = sock->sk; 723 struct sock *sk = sock->sk;
724 724
725 inet_rps_record_flow(sk); 725 sock_rps_record_flow(sk);
726 726
727 /* We may need to bind the socket. */ 727 /* We may need to bind the socket. */
728 if (!inet_sk(sk)->inet_num && inet_autobind(sk)) 728 if (!inet_sk(sk)->inet_num && inet_autobind(sk))
@@ -737,7 +737,7 @@ static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
737{ 737{
738 struct sock *sk = sock->sk; 738 struct sock *sk = sock->sk;
739 739
740 inet_rps_record_flow(sk); 740 sock_rps_record_flow(sk);
741 741
742 /* We may need to bind the socket. */ 742 /* We may need to bind the socket. */
743 if (!inet_sk(sk)->inet_num && inet_autobind(sk)) 743 if (!inet_sk(sk)->inet_num && inet_autobind(sk))
@@ -755,7 +755,7 @@ int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
755 int addr_len = 0; 755 int addr_len = 0;
756 int err; 756 int err;
757 757
758 inet_rps_record_flow(sk); 758 sock_rps_record_flow(sk);
759 759
760 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, 760 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
761 flags & ~MSG_DONTWAIT, &addr_len); 761 flags & ~MSG_DONTWAIT, &addr_len);
@@ -1323,8 +1323,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1323 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1323 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
1324 goto out_unlock; 1324 goto out_unlock;
1325 1325
1326 id = ntohl(*(u32 *)&iph->id); 1326 id = ntohl(*(__be32 *)&iph->id);
1327 flush = (u16)((ntohl(*(u32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF)); 1327 flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF));
1328 id >>= 16; 1328 id >>= 16;
1329 1329
1330 for (p = *head; p; p = p->next) { 1330 for (p = *head; p; p = p->next) {
@@ -1337,8 +1337,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1337 1337
1338 if ((iph->protocol ^ iph2->protocol) | 1338 if ((iph->protocol ^ iph2->protocol) |
1339 (iph->tos ^ iph2->tos) | 1339 (iph->tos ^ iph2->tos) |
1340 (iph->saddr ^ iph2->saddr) | 1340 ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
1341 (iph->daddr ^ iph2->daddr)) { 1341 ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
1342 NAPI_GRO_CB(p)->same_flow = 0; 1342 NAPI_GRO_CB(p)->same_flow = 0;
1343 continue; 1343 continue;
1344 } 1344 }
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 3ec84fea5b71..76daeb5ff564 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -245,8 +245,8 @@ static void fib4_rule_flush_cache(struct fib_rules_ops *ops)
245 rt_cache_flush(ops->fro_net, -1); 245 rt_cache_flush(ops->fro_net, -1);
246} 246}
247 247
248static struct fib_rules_ops fib4_rules_ops_template = { 248static const struct fib_rules_ops __net_initdata fib4_rules_ops_template = {
249 .family = FIB_RULES_IPV4, 249 .family = AF_INET,
250 .rule_size = sizeof(struct fib4_rule), 250 .rule_size = sizeof(struct fib4_rule),
251 .addr_size = sizeof(u32), 251 .addr_size = sizeof(u32),
252 .action = fib4_rule_action, 252 .action = fib4_rule_action,
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 59a838795e3e..c98f115fb0fd 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -209,7 +209,9 @@ static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
209{ 209{
210 struct node *ret = tnode_get_child(tn, i); 210 struct node *ret = tnode_get_child(tn, i);
211 211
212 return rcu_dereference(ret); 212 return rcu_dereference_check(ret,
213 rcu_read_lock_held() ||
214 lockdep_rtnl_is_held());
213} 215}
214 216
215static inline int tnode_child_length(const struct tnode *tn) 217static inline int tnode_child_length(const struct tnode *tn)
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 8da6429269dd..e0a3e3537b14 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -234,7 +234,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
234 * having to remove and re-insert us on the wait queue. 234 * having to remove and re-insert us on the wait queue.
235 */ 235 */
236 for (;;) { 236 for (;;) {
237 prepare_to_wait_exclusive(sk->sk_sleep, &wait, 237 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
238 TASK_INTERRUPTIBLE); 238 TASK_INTERRUPTIBLE);
239 release_sock(sk); 239 release_sock(sk);
240 if (reqsk_queue_empty(&icsk->icsk_accept_queue)) 240 if (reqsk_queue_empty(&icsk->icsk_accept_queue))
@@ -253,7 +253,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
253 if (!timeo) 253 if (!timeo)
254 break; 254 break;
255 } 255 }
256 finish_wait(sk->sk_sleep, &wait); 256 finish_wait(sk_sleep(sk), &wait);
257 return err; 257 return err;
258} 258}
259 259
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index d979710684b2..252897443ef9 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -120,7 +120,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb)
120 newskb->pkt_type = PACKET_LOOPBACK; 120 newskb->pkt_type = PACKET_LOOPBACK;
121 newskb->ip_summed = CHECKSUM_UNNECESSARY; 121 newskb->ip_summed = CHECKSUM_UNNECESSARY;
122 WARN_ON(!skb_dst(newskb)); 122 WARN_ON(!skb_dst(newskb));
123 netif_rx(newskb); 123 netif_rx_ni(newskb);
124 return 0; 124 return 0;
125} 125}
126 126
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index b0aa0546a3b3..ce231780a2b1 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -954,6 +954,22 @@ e_inval:
954 return -EINVAL; 954 return -EINVAL;
955} 955}
956 956
957/**
958 * ip_queue_rcv_skb - Queue an skb into sock receive queue
959 * @sk: socket
960 * @skb: buffer
961 *
962 * Queues an skb into socket receive queue. If IP_CMSG_PKTINFO option
963 * is not set, we drop skb dst entry now, while dst cache line is hot.
964 */
965int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
966{
967 if (!(inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO))
968 skb_dst_drop(skb);
969 return sock_queue_rcv_skb(sk, skb);
970}
971EXPORT_SYMBOL(ip_queue_rcv_skb);
972
957int ip_setsockopt(struct sock *sk, int level, 973int ip_setsockopt(struct sock *sk, int level,
958 int optname, char __user *optval, unsigned int optlen) 974 int optname, char __user *optval, unsigned int optlen)
959{ 975{
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 1aa498d7a0a5..f3f1c6b5c70c 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -128,8 +128,8 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
128 int local); 128 int local);
129static int ipmr_cache_report(struct mr_table *mrt, 129static int ipmr_cache_report(struct mr_table *mrt,
130 struct sk_buff *pkt, vifi_t vifi, int assert); 130 struct sk_buff *pkt, vifi_t vifi, int assert);
131static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 131static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
132 struct mfc_cache *c, struct rtmsg *rtm); 132 struct mfc_cache *c, struct rtmsg *rtm);
133static void ipmr_expire_process(unsigned long arg); 133static void ipmr_expire_process(unsigned long arg);
134 134
135#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES 135#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -216,8 +216,8 @@ static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
216 return 0; 216 return 0;
217} 217}
218 218
219static struct fib_rules_ops ipmr_rules_ops_template = { 219static const struct fib_rules_ops __net_initdata ipmr_rules_ops_template = {
220 .family = FIB_RULES_IPMR, 220 .family = RTNL_FAMILY_IPMR,
221 .rule_size = sizeof(struct ipmr_rule), 221 .rule_size = sizeof(struct ipmr_rule),
222 .addr_size = sizeof(u32), 222 .addr_size = sizeof(u32),
223 .action = ipmr_rule_action, 223 .action = ipmr_rule_action,
@@ -831,7 +831,7 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
831 if (ip_hdr(skb)->version == 0) { 831 if (ip_hdr(skb)->version == 0) {
832 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 832 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
833 833
834 if (ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { 834 if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
835 nlh->nlmsg_len = (skb_tail_pointer(skb) - 835 nlh->nlmsg_len = (skb_tail_pointer(skb) -
836 (u8 *)nlh); 836 (u8 *)nlh);
837 } else { 837 } else {
@@ -1772,10 +1772,10 @@ int ip_mr_input(struct sk_buff *skb)
1772 1772
1773 vif = ipmr_find_vif(mrt, skb->dev); 1773 vif = ipmr_find_vif(mrt, skb->dev);
1774 if (vif >= 0) { 1774 if (vif >= 0) {
1775 int err = ipmr_cache_unresolved(mrt, vif, skb); 1775 int err2 = ipmr_cache_unresolved(mrt, vif, skb);
1776 read_unlock(&mrt_lock); 1776 read_unlock(&mrt_lock);
1777 1777
1778 return err; 1778 return err2;
1779 } 1779 }
1780 read_unlock(&mrt_lock); 1780 read_unlock(&mrt_lock);
1781 kfree_skb(skb); 1781 kfree_skb(skb);
@@ -1904,9 +1904,8 @@ drop:
1904} 1904}
1905#endif 1905#endif
1906 1906
1907static int 1907static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
1908ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c, 1908 struct mfc_cache *c, struct rtmsg *rtm)
1909 struct rtmsg *rtm)
1910{ 1909{
1911 int ct; 1910 int ct;
1912 struct rtnexthop *nhp; 1911 struct rtnexthop *nhp;
@@ -1994,11 +1993,93 @@ int ipmr_get_route(struct net *net,
1994 1993
1995 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) 1994 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1996 cache->mfc_flags |= MFC_NOTIFY; 1995 cache->mfc_flags |= MFC_NOTIFY;
1997 err = ipmr_fill_mroute(mrt, skb, cache, rtm); 1996 err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
1998 read_unlock(&mrt_lock); 1997 read_unlock(&mrt_lock);
1999 return err; 1998 return err;
2000} 1999}
2001 2000
2001static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2002 u32 pid, u32 seq, struct mfc_cache *c)
2003{
2004 struct nlmsghdr *nlh;
2005 struct rtmsg *rtm;
2006
2007 nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
2008 if (nlh == NULL)
2009 return -EMSGSIZE;
2010
2011 rtm = nlmsg_data(nlh);
2012 rtm->rtm_family = RTNL_FAMILY_IPMR;
2013 rtm->rtm_dst_len = 32;
2014 rtm->rtm_src_len = 32;
2015 rtm->rtm_tos = 0;
2016 rtm->rtm_table = mrt->id;
2017 NLA_PUT_U32(skb, RTA_TABLE, mrt->id);
2018 rtm->rtm_type = RTN_MULTICAST;
2019 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2020 rtm->rtm_protocol = RTPROT_UNSPEC;
2021 rtm->rtm_flags = 0;
2022
2023 NLA_PUT_BE32(skb, RTA_SRC, c->mfc_origin);
2024 NLA_PUT_BE32(skb, RTA_DST, c->mfc_mcastgrp);
2025
2026 if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0)
2027 goto nla_put_failure;
2028
2029 return nlmsg_end(skb, nlh);
2030
2031nla_put_failure:
2032 nlmsg_cancel(skb, nlh);
2033 return -EMSGSIZE;
2034}
2035
2036static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2037{
2038 struct net *net = sock_net(skb->sk);
2039 struct mr_table *mrt;
2040 struct mfc_cache *mfc;
2041 unsigned int t = 0, s_t;
2042 unsigned int h = 0, s_h;
2043 unsigned int e = 0, s_e;
2044
2045 s_t = cb->args[0];
2046 s_h = cb->args[1];
2047 s_e = cb->args[2];
2048
2049 read_lock(&mrt_lock);
2050 ipmr_for_each_table(mrt, net) {
2051 if (t < s_t)
2052 goto next_table;
2053 if (t > s_t)
2054 s_h = 0;
2055 for (h = s_h; h < MFC_LINES; h++) {
2056 list_for_each_entry(mfc, &mrt->mfc_cache_array[h], list) {
2057 if (e < s_e)
2058 goto next_entry;
2059 if (ipmr_fill_mroute(mrt, skb,
2060 NETLINK_CB(cb->skb).pid,
2061 cb->nlh->nlmsg_seq,
2062 mfc) < 0)
2063 goto done;
2064next_entry:
2065 e++;
2066 }
2067 e = s_e = 0;
2068 }
2069 s_h = 0;
2070next_table:
2071 t++;
2072 }
2073done:
2074 read_unlock(&mrt_lock);
2075
2076 cb->args[2] = e;
2077 cb->args[1] = h;
2078 cb->args[0] = t;
2079
2080 return skb->len;
2081}
2082
2002#ifdef CONFIG_PROC_FS 2083#ifdef CONFIG_PROC_FS
2003/* 2084/*
2004 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif 2085 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
@@ -2227,9 +2308,9 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2227 const struct ipmr_mfc_iter *it = seq->private; 2308 const struct ipmr_mfc_iter *it = seq->private;
2228 const struct mr_table *mrt = it->mrt; 2309 const struct mr_table *mrt = it->mrt;
2229 2310
2230 seq_printf(seq, "%08lX %08lX %-3hd", 2311 seq_printf(seq, "%08X %08X %-3hd",
2231 (unsigned long) mfc->mfc_mcastgrp, 2312 (__force u32) mfc->mfc_mcastgrp,
2232 (unsigned long) mfc->mfc_origin, 2313 (__force u32) mfc->mfc_origin,
2233 mfc->mfc_parent); 2314 mfc->mfc_parent);
2234 2315
2235 if (it->cache != &mrt->mfc_unres_queue) { 2316 if (it->cache != &mrt->mfc_unres_queue) {
@@ -2355,6 +2436,7 @@ int __init ip_mr_init(void)
2355 goto add_proto_fail; 2436 goto add_proto_fail;
2356 } 2437 }
2357#endif 2438#endif
2439 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE, NULL, ipmr_rtm_dumproute);
2358 return 0; 2440 return 0;
2359 2441
2360#ifdef CONFIG_IP_PIMSM_V2 2442#ifdef CONFIG_IP_PIMSM_V2
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bbda0d5f9244..2c7a1639388a 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -290,7 +290,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
290{ 290{
291 /* Charge it to the socket. */ 291 /* Charge it to the socket. */
292 292
293 if (sock_queue_rcv_skb(sk, skb) < 0) { 293 if (ip_queue_rcv_skb(sk, skb) < 0) {
294 kfree_skb(skb); 294 kfree_skb(skb);
295 return NET_RX_DROP; 295 return NET_RX_DROP;
296 } 296 }
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cb562fdd9b9a..dea3f9264250 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -129,7 +129,6 @@ static int ip_rt_gc_elasticity __read_mostly = 8;
129static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; 129static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
130static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; 130static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
131static int ip_rt_min_advmss __read_mostly = 256; 131static int ip_rt_min_advmss __read_mostly = 256;
132static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ;
133static int rt_chain_length_max __read_mostly = 20; 132static int rt_chain_length_max __read_mostly = 20;
134 133
135static struct delayed_work expires_work; 134static struct delayed_work expires_work;
@@ -258,10 +257,9 @@ static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
258 (__raw_get_cpu_var(rt_cache_stat).field++) 257 (__raw_get_cpu_var(rt_cache_stat).field++)
259 258
260static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx, 259static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
261 int genid) 260 int genid)
262{ 261{
263 return jhash_3words((__force u32)(__be32)(daddr), 262 return jhash_3words((__force u32)daddr, (__force u32)saddr,
264 (__force u32)(__be32)(saddr),
265 idx, genid) 263 idx, genid)
266 & rt_hash_mask; 264 & rt_hash_mask;
267} 265}
@@ -378,12 +376,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
378 struct rtable *r = v; 376 struct rtable *r = v;
379 int len; 377 int len;
380 378
381 seq_printf(seq, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t" 379 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
382 "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", 380 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
383 r->u.dst.dev ? r->u.dst.dev->name : "*", 381 r->u.dst.dev ? r->u.dst.dev->name : "*",
384 (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway, 382 (__force u32)r->rt_dst,
383 (__force u32)r->rt_gateway,
385 r->rt_flags, atomic_read(&r->u.dst.__refcnt), 384 r->rt_flags, atomic_read(&r->u.dst.__refcnt),
386 r->u.dst.__use, 0, (unsigned long)r->rt_src, 385 r->u.dst.__use, 0, (__force u32)r->rt_src,
387 (dst_metric(&r->u.dst, RTAX_ADVMSS) ? 386 (dst_metric(&r->u.dst, RTAX_ADVMSS) ?
388 (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0), 387 (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0),
389 dst_metric(&r->u.dst, RTAX_WINDOW), 388 dst_metric(&r->u.dst, RTAX_WINDOW),
@@ -685,18 +684,17 @@ static inline bool rt_caching(const struct net *net)
685static inline bool compare_hash_inputs(const struct flowi *fl1, 684static inline bool compare_hash_inputs(const struct flowi *fl1,
686 const struct flowi *fl2) 685 const struct flowi *fl2)
687{ 686{
688 return (__force u32)(((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | 687 return ((((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) |
689 (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) | 688 ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) |
690 (fl1->iif ^ fl2->iif)) == 0); 689 (fl1->iif ^ fl2->iif)) == 0);
691} 690}
692 691
693static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) 692static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
694{ 693{
695 return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | 694 return (((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) |
696 (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) | 695 ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) |
697 (fl1->mark ^ fl2->mark) | 696 (fl1->mark ^ fl2->mark) |
698 (*(u16 *)&fl1->nl_u.ip4_u.tos ^ 697 (*(u16 *)&fl1->nl_u.ip4_u.tos ^ *(u16 *)&fl2->nl_u.ip4_u.tos) |
699 *(u16 *)&fl2->nl_u.ip4_u.tos) |
700 (fl1->oif ^ fl2->oif) | 698 (fl1->oif ^ fl2->oif) |
701 (fl1->iif ^ fl2->iif)) == 0; 699 (fl1->iif ^ fl2->iif)) == 0;
702} 700}
@@ -919,32 +917,11 @@ void rt_cache_flush_batch(void)
919 rt_do_flush(!in_softirq()); 917 rt_do_flush(!in_softirq());
920} 918}
921 919
922/*
923 * We change rt_genid and let gc do the cleanup
924 */
925static void rt_secret_rebuild(unsigned long __net)
926{
927 struct net *net = (struct net *)__net;
928 rt_cache_invalidate(net);
929 mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
930}
931
932static void rt_secret_rebuild_oneshot(struct net *net)
933{
934 del_timer_sync(&net->ipv4.rt_secret_timer);
935 rt_cache_invalidate(net);
936 if (ip_rt_secret_interval)
937 mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
938}
939
940static void rt_emergency_hash_rebuild(struct net *net) 920static void rt_emergency_hash_rebuild(struct net *net)
941{ 921{
942 if (net_ratelimit()) { 922 if (net_ratelimit())
943 printk(KERN_WARNING "Route hash chain too long!\n"); 923 printk(KERN_WARNING "Route hash chain too long!\n");
944 printk(KERN_WARNING "Adjust your secret_interval!\n"); 924 rt_cache_invalidate(net);
945 }
946
947 rt_secret_rebuild_oneshot(net);
948} 925}
949 926
950/* 927/*
@@ -2319,8 +2296,8 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2319 rcu_read_lock(); 2296 rcu_read_lock();
2320 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2297 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2321 rth = rcu_dereference(rth->u.dst.rt_next)) { 2298 rth = rcu_dereference(rth->u.dst.rt_next)) {
2322 if (((rth->fl.fl4_dst ^ daddr) | 2299 if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) |
2323 (rth->fl.fl4_src ^ saddr) | 2300 ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) |
2324 (rth->fl.iif ^ iif) | 2301 (rth->fl.iif ^ iif) |
2325 rth->fl.oif | 2302 rth->fl.oif |
2326 (rth->fl.fl4_tos ^ tos)) == 0 && 2303 (rth->fl.fl4_tos ^ tos)) == 0 &&
@@ -3102,48 +3079,6 @@ static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
3102 return -EINVAL; 3079 return -EINVAL;
3103} 3080}
3104 3081
3105static void rt_secret_reschedule(int old)
3106{
3107 struct net *net;
3108 int new = ip_rt_secret_interval;
3109 int diff = new - old;
3110
3111 if (!diff)
3112 return;
3113
3114 rtnl_lock();
3115 for_each_net(net) {
3116 int deleted = del_timer_sync(&net->ipv4.rt_secret_timer);
3117 long time;
3118
3119 if (!new)
3120 continue;
3121
3122 if (deleted) {
3123 time = net->ipv4.rt_secret_timer.expires - jiffies;
3124
3125 if (time <= 0 || (time += diff) <= 0)
3126 time = 0;
3127 } else
3128 time = new;
3129
3130 mod_timer(&net->ipv4.rt_secret_timer, jiffies + time);
3131 }
3132 rtnl_unlock();
3133}
3134
3135static int ipv4_sysctl_rt_secret_interval(ctl_table *ctl, int write,
3136 void __user *buffer, size_t *lenp,
3137 loff_t *ppos)
3138{
3139 int old = ip_rt_secret_interval;
3140 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3141
3142 rt_secret_reschedule(old);
3143
3144 return ret;
3145}
3146
3147static ctl_table ipv4_route_table[] = { 3082static ctl_table ipv4_route_table[] = {
3148 { 3083 {
3149 .procname = "gc_thresh", 3084 .procname = "gc_thresh",
@@ -3252,13 +3187,6 @@ static ctl_table ipv4_route_table[] = {
3252 .mode = 0644, 3187 .mode = 0644,
3253 .proc_handler = proc_dointvec, 3188 .proc_handler = proc_dointvec,
3254 }, 3189 },
3255 {
3256 .procname = "secret_interval",
3257 .data = &ip_rt_secret_interval,
3258 .maxlen = sizeof(int),
3259 .mode = 0644,
3260 .proc_handler = ipv4_sysctl_rt_secret_interval,
3261 },
3262 { } 3190 { }
3263}; 3191};
3264 3192
@@ -3337,34 +3265,15 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
3337}; 3265};
3338#endif 3266#endif
3339 3267
3340 3268static __net_init int rt_genid_init(struct net *net)
3341static __net_init int rt_secret_timer_init(struct net *net)
3342{ 3269{
3343 atomic_set(&net->ipv4.rt_genid, 3270 get_random_bytes(&net->ipv4.rt_genid,
3344 (int) ((num_physpages ^ (num_physpages>>8)) ^ 3271 sizeof(net->ipv4.rt_genid));
3345 (jiffies ^ (jiffies >> 7))));
3346
3347 net->ipv4.rt_secret_timer.function = rt_secret_rebuild;
3348 net->ipv4.rt_secret_timer.data = (unsigned long)net;
3349 init_timer_deferrable(&net->ipv4.rt_secret_timer);
3350
3351 if (ip_rt_secret_interval) {
3352 net->ipv4.rt_secret_timer.expires =
3353 jiffies + net_random() % ip_rt_secret_interval +
3354 ip_rt_secret_interval;
3355 add_timer(&net->ipv4.rt_secret_timer);
3356 }
3357 return 0; 3272 return 0;
3358} 3273}
3359 3274
3360static __net_exit void rt_secret_timer_exit(struct net *net) 3275static __net_initdata struct pernet_operations rt_genid_ops = {
3361{ 3276 .init = rt_genid_init,
3362 del_timer_sync(&net->ipv4.rt_secret_timer);
3363}
3364
3365static __net_initdata struct pernet_operations rt_secret_timer_ops = {
3366 .init = rt_secret_timer_init,
3367 .exit = rt_secret_timer_exit,
3368}; 3277};
3369 3278
3370 3279
@@ -3425,9 +3334,6 @@ int __init ip_rt_init(void)
3425 schedule_delayed_work(&expires_work, 3334 schedule_delayed_work(&expires_work,
3426 net_random() % ip_rt_gc_interval + ip_rt_gc_interval); 3335 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3427 3336
3428 if (register_pernet_subsys(&rt_secret_timer_ops))
3429 printk(KERN_ERR "Unable to setup rt_secret_timer\n");
3430
3431 if (ip_rt_proc_init()) 3337 if (ip_rt_proc_init())
3432 printk(KERN_ERR "Unable to create route proc files\n"); 3338 printk(KERN_ERR "Unable to create route proc files\n");
3433#ifdef CONFIG_XFRM 3339#ifdef CONFIG_XFRM
@@ -3439,6 +3345,7 @@ int __init ip_rt_init(void)
3439#ifdef CONFIG_SYSCTL 3345#ifdef CONFIG_SYSCTL
3440 register_pernet_subsys(&sysctl_route_ops); 3346 register_pernet_subsys(&sysctl_route_ops);
3441#endif 3347#endif
3348 register_pernet_subsys(&rt_genid_ops);
3442 return rc; 3349 return rc;
3443} 3350}
3444 3351
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0f8caf64caa3..8ce29747ad9b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -378,7 +378,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
378 struct sock *sk = sock->sk; 378 struct sock *sk = sock->sk;
379 struct tcp_sock *tp = tcp_sk(sk); 379 struct tcp_sock *tp = tcp_sk(sk);
380 380
381 sock_poll_wait(file, sk->sk_sleep, wait); 381 sock_poll_wait(file, sk_sleep(sk), wait);
382 if (sk->sk_state == TCP_LISTEN) 382 if (sk->sk_state == TCP_LISTEN)
383 return inet_csk_listen_poll(sk); 383 return inet_csk_listen_poll(sk);
384 384
@@ -2298,7 +2298,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2298 if (sock_flag(sk, SOCK_KEEPOPEN) && 2298 if (sock_flag(sk, SOCK_KEEPOPEN) &&
2299 !((1 << sk->sk_state) & 2299 !((1 << sk->sk_state) &
2300 (TCPF_CLOSE | TCPF_LISTEN))) { 2300 (TCPF_CLOSE | TCPF_LISTEN))) {
2301 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp; 2301 u32 elapsed = keepalive_time_elapsed(tp);
2302 if (tp->keepalive_time > elapsed) 2302 if (tp->keepalive_time > elapsed)
2303 elapsed = tp->keepalive_time - elapsed; 2303 elapsed = tp->keepalive_time - elapsed;
2304 else 2304 else
@@ -2721,7 +2721,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2721 struct tcphdr *th2; 2721 struct tcphdr *th2;
2722 unsigned int len; 2722 unsigned int len;
2723 unsigned int thlen; 2723 unsigned int thlen;
2724 unsigned int flags; 2724 __be32 flags;
2725 unsigned int mss = 1; 2725 unsigned int mss = 1;
2726 unsigned int hlen; 2726 unsigned int hlen;
2727 unsigned int off; 2727 unsigned int off;
@@ -2771,10 +2771,10 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2771 2771
2772found: 2772found:
2773 flush = NAPI_GRO_CB(p)->flush; 2773 flush = NAPI_GRO_CB(p)->flush;
2774 flush |= flags & TCP_FLAG_CWR; 2774 flush |= (__force int)(flags & TCP_FLAG_CWR);
2775 flush |= (flags ^ tcp_flag_word(th2)) & 2775 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
2776 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH); 2776 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
2777 flush |= th->ack_seq ^ th2->ack_seq; 2777 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
2778 for (i = sizeof(*th); i < thlen; i += 4) 2778 for (i = sizeof(*th); i < thlen; i += 4)
2779 flush |= *(u32 *)((u8 *)th + i) ^ 2779 flush |= *(u32 *)((u8 *)th + i) ^
2780 *(u32 *)((u8 *)th2 + i); 2780 *(u32 *)((u8 *)th2 + i);
@@ -2795,8 +2795,9 @@ found:
2795 2795
2796out_check_final: 2796out_check_final:
2797 flush = len < mss; 2797 flush = len < mss;
2798 flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST | 2798 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
2799 TCP_FLAG_SYN | TCP_FLAG_FIN); 2799 TCP_FLAG_RST | TCP_FLAG_SYN |
2800 TCP_FLAG_FIN));
2800 2801
2801 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) 2802 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
2802 pp = head; 2803 pp = head;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ae3ec15fb630..e82162c211bf 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4367,6 +4367,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4367 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) 4367 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
4368 goto drop; 4368 goto drop;
4369 4369
4370 skb_dst_drop(skb);
4370 __skb_pull(skb, th->doff * 4); 4371 __skb_pull(skb, th->doff * 4);
4371 4372
4372 TCP_ECN_accept_cwr(tp, skb); 4373 TCP_ECN_accept_cwr(tp, skb);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index ad08392a738c..771f8146a2e5 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1286,8 +1286,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1286 goto drop_and_release; 1286 goto drop_and_release;
1287 1287
1288 /* Secret recipe starts with IP addresses */ 1288 /* Secret recipe starts with IP addresses */
1289 *mess++ ^= daddr; 1289 *mess++ ^= (__force u32)daddr;
1290 *mess++ ^= saddr; 1290 *mess++ ^= (__force u32)saddr;
1291 1291
1292 /* plus variable length Initiator Cookie */ 1292 /* plus variable length Initiator Cookie */
1293 c = (u8 *)mess; 1293 c = (u8 *)mess;
@@ -1672,7 +1672,7 @@ process:
1672 1672
1673 skb->dev = NULL; 1673 skb->dev = NULL;
1674 1674
1675 inet_rps_save_rxhash(sk, skb->rxhash); 1675 sock_rps_save_rxhash(sk, skb->rxhash);
1676 1676
1677 bh_lock_sock_nested(sk); 1677 bh_lock_sock_nested(sk);
1678 ret = 0; 1678 ret = 0;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 2b7d71fb8439..5db3a2c6cb33 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -861,7 +861,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
861 th->urg_ptr = htons(tp->snd_up - tcb->seq); 861 th->urg_ptr = htons(tp->snd_up - tcb->seq);
862 th->urg = 1; 862 th->urg = 1;
863 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { 863 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
864 th->urg_ptr = 0xFFFF; 864 th->urg_ptr = htons(0xFFFF);
865 th->urg = 1; 865 th->urg = 1;
866 } 866 }
867 } 867 }
@@ -888,7 +888,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
888 tcp_event_data_sent(tp, skb, sk); 888 tcp_event_data_sent(tp, skb, sk);
889 889
890 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 890 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
891 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); 891 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
892 tcp_skb_pcount(skb));
892 893
893 err = icsk->icsk_af_ops->queue_xmit(skb); 894 err = icsk->icsk_af_ops->queue_xmit(skb);
894 if (likely(err <= 0)) 895 if (likely(err <= 0))
@@ -2485,7 +2486,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2485 *tail-- ^= TCP_SKB_CB(skb)->seq + 1; 2486 *tail-- ^= TCP_SKB_CB(skb)->seq + 1;
2486 2487
2487 /* recommended */ 2488 /* recommended */
2488 *tail-- ^= ((th->dest << 16) | th->source); 2489 *tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source);
2489 *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */ 2490 *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */
2490 2491
2491 sha_transform((__u32 *)&xvp->cookie_bakery[0], 2492 sha_transform((__u32 *)&xvp->cookie_bakery[0],
@@ -2503,7 +2504,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2503 th->window = htons(min(req->rcv_wnd, 65535U)); 2504 th->window = htons(min(req->rcv_wnd, 65535U));
2504 tcp_options_write((__be32 *)(th + 1), tp, &opts); 2505 tcp_options_write((__be32 *)(th + 1), tp, &opts);
2505 th->doff = (tcp_header_size >> 2); 2506 th->doff = (tcp_header_size >> 2);
2506 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); 2507 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb));
2507 2508
2508#ifdef CONFIG_TCP_MD5SIG 2509#ifdef CONFIG_TCP_MD5SIG
2509 /* Okay, we have all we need - do the md5 hash if needed */ 2510 /* Okay, we have all we need - do the md5 hash if needed */
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index c732be00606b..440a5c6004f6 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -517,7 +517,7 @@ static void tcp_keepalive_timer (unsigned long data)
517 struct sock *sk = (struct sock *) data; 517 struct sock *sk = (struct sock *) data;
518 struct inet_connection_sock *icsk = inet_csk(sk); 518 struct inet_connection_sock *icsk = inet_csk(sk);
519 struct tcp_sock *tp = tcp_sk(sk); 519 struct tcp_sock *tp = tcp_sk(sk);
520 __u32 elapsed; 520 u32 elapsed;
521 521
522 /* Only process if socket is not in use. */ 522 /* Only process if socket is not in use. */
523 bh_lock_sock(sk); 523 bh_lock_sock(sk);
@@ -554,7 +554,7 @@ static void tcp_keepalive_timer (unsigned long data)
554 if (tp->packets_out || tcp_send_head(sk)) 554 if (tp->packets_out || tcp_send_head(sk))
555 goto resched; 555 goto resched;
556 556
557 elapsed = tcp_time_stamp - tp->rcv_tstamp; 557 elapsed = keepalive_time_elapsed(tp);
558 558
559 if (elapsed >= keepalive_time_when(tp)) { 559 if (elapsed >= keepalive_time_when(tp)) {
560 if (icsk->icsk_probes_out >= keepalive_probes(tp)) { 560 if (icsk->icsk_probes_out >= keepalive_probes(tp)) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 666b963496ff..4560b291180b 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -307,13 +307,13 @@ static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
307static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr, 307static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr,
308 unsigned int port) 308 unsigned int port)
309{ 309{
310 return jhash_1word(saddr, net_hash_mix(net)) ^ port; 310 return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
311} 311}
312 312
313int udp_v4_get_port(struct sock *sk, unsigned short snum) 313int udp_v4_get_port(struct sock *sk, unsigned short snum)
314{ 314{
315 unsigned int hash2_nulladdr = 315 unsigned int hash2_nulladdr =
316 udp4_portaddr_hash(sock_net(sk), INADDR_ANY, snum); 316 udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
317 unsigned int hash2_partial = 317 unsigned int hash2_partial =
318 udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); 318 udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
319 319
@@ -466,14 +466,14 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
466 daddr, hnum, dif, 466 daddr, hnum, dif,
467 hslot2, slot2); 467 hslot2, slot2);
468 if (!result) { 468 if (!result) {
469 hash2 = udp4_portaddr_hash(net, INADDR_ANY, hnum); 469 hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
470 slot2 = hash2 & udptable->mask; 470 slot2 = hash2 & udptable->mask;
471 hslot2 = &udptable->hash2[slot2]; 471 hslot2 = &udptable->hash2[slot2];
472 if (hslot->count < hslot2->count) 472 if (hslot->count < hslot2->count)
473 goto begin; 473 goto begin;
474 474
475 result = udp4_lib_lookup2(net, saddr, sport, 475 result = udp4_lib_lookup2(net, saddr, sport,
476 INADDR_ANY, hnum, dif, 476 htonl(INADDR_ANY), hnum, dif,
477 hslot2, slot2); 477 hslot2, slot2);
478 } 478 }
479 rcu_read_unlock(); 479 rcu_read_unlock();
@@ -1062,10 +1062,10 @@ static unsigned int first_packet_length(struct sock *sk)
1062 spin_unlock_bh(&rcvq->lock); 1062 spin_unlock_bh(&rcvq->lock);
1063 1063
1064 if (!skb_queue_empty(&list_kill)) { 1064 if (!skb_queue_empty(&list_kill)) {
1065 lock_sock(sk); 1065 lock_sock_bh(sk);
1066 __skb_queue_purge(&list_kill); 1066 __skb_queue_purge(&list_kill);
1067 sk_mem_reclaim_partial(sk); 1067 sk_mem_reclaim_partial(sk);
1068 release_sock(sk); 1068 unlock_sock_bh(sk);
1069 } 1069 }
1070 return res; 1070 return res;
1071} 1071}
@@ -1196,10 +1196,10 @@ out:
1196 return err; 1196 return err;
1197 1197
1198csum_copy_err: 1198csum_copy_err:
1199 lock_sock(sk); 1199 lock_sock_bh(sk);
1200 if (!skb_kill_datagram(sk, skb, flags)) 1200 if (!skb_kill_datagram(sk, skb, flags))
1201 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1201 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1202 release_sock(sk); 1202 unlock_sock_bh(sk);
1203 1203
1204 if (noblock) 1204 if (noblock)
1205 return -EAGAIN; 1205 return -EAGAIN;
@@ -1217,7 +1217,7 @@ int udp_disconnect(struct sock *sk, int flags)
1217 sk->sk_state = TCP_CLOSE; 1217 sk->sk_state = TCP_CLOSE;
1218 inet->inet_daddr = 0; 1218 inet->inet_daddr = 0;
1219 inet->inet_dport = 0; 1219 inet->inet_dport = 0;
1220 inet_rps_save_rxhash(sk, 0); 1220 sock_rps_save_rxhash(sk, 0);
1221 sk->sk_bound_dev_if = 0; 1221 sk->sk_bound_dev_if = 0;
1222 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 1222 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1223 inet_reset_saddr(sk); 1223 inet_reset_saddr(sk);
@@ -1262,9 +1262,9 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1262 int rc; 1262 int rc;
1263 1263
1264 if (inet_sk(sk)->inet_daddr) 1264 if (inet_sk(sk)->inet_daddr)
1265 inet_rps_save_rxhash(sk, skb->rxhash); 1265 sock_rps_save_rxhash(sk, skb->rxhash);
1266 1266
1267 rc = sock_queue_rcv_skb(sk, skb); 1267 rc = ip_queue_rcv_skb(sk, skb);
1268 if (rc < 0) { 1268 if (rc < 0) {
1269 int is_udplite = IS_UDPLITE(sk); 1269 int is_udplite = IS_UDPLITE(sk);
1270 1270
@@ -1372,6 +1372,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1372 goto drop; 1372 goto drop;
1373 } 1373 }
1374 1374
1375
1376 if (sk_rcvqueues_full(sk, skb))
1377 goto drop;
1378
1375 rc = 0; 1379 rc = 0;
1376 1380
1377 bh_lock_sock(sk); 1381 bh_lock_sock(sk);
@@ -1620,9 +1624,9 @@ int udp_rcv(struct sk_buff *skb)
1620 1624
1621void udp_destroy_sock(struct sock *sk) 1625void udp_destroy_sock(struct sock *sk)
1622{ 1626{
1623 lock_sock(sk); 1627 lock_sock_bh(sk);
1624 udp_flush_pending_frames(sk); 1628 udp_flush_pending_frames(sk);
1625 release_sock(sk); 1629 unlock_sock_bh(sk);
1626} 1630}
1627 1631
1628/* 1632/*