aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/bpfilter/sockopt.c58
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_frontend.c4
-rw-r--r--net/ipv4/fib_trie.c15
-rw-r--r--net/ipv4/fou.c12
-rw-r--r--net/ipv4/gre_demux.c17
-rw-r--r--net/ipv4/inet_diag.c10
-rw-r--r--net/ipv4/inetpeer.c1
-rw-r--r--net/ipv4/ip_gre.c66
-rw-r--r--net/ipv4/ip_input.c1
-rw-r--r--net/ipv4/ip_sockglue.c12
-rw-r--r--net/ipv4/ip_tunnel.c8
-rw-r--r--net/ipv4/ip_vti.c50
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_l3proto_ipv4.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic_main.c7
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/tcp_timer.c2
-rw-r--r--net/ipv4/udp.c24
-rw-r--r--net/ipv4/udp_impl.h1
-rw-r--r--net/ipv4/udplite.c1
25 files changed, 237 insertions, 76 deletions
diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c
index 5e04ed25bc0e..1e976bb93d99 100644
--- a/net/ipv4/bpfilter/sockopt.c
+++ b/net/ipv4/bpfilter/sockopt.c
@@ -1,28 +1,54 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <linux/init.h>
3#include <linux/module.h>
2#include <linux/uaccess.h> 4#include <linux/uaccess.h>
3#include <linux/bpfilter.h> 5#include <linux/bpfilter.h>
4#include <uapi/linux/bpf.h> 6#include <uapi/linux/bpf.h>
5#include <linux/wait.h> 7#include <linux/wait.h>
6#include <linux/kmod.h> 8#include <linux/kmod.h>
9#include <linux/fs.h>
10#include <linux/file.h>
7 11
8int (*bpfilter_process_sockopt)(struct sock *sk, int optname, 12struct bpfilter_umh_ops bpfilter_ops;
9 char __user *optval, 13EXPORT_SYMBOL_GPL(bpfilter_ops);
10 unsigned int optlen, bool is_set); 14
11EXPORT_SYMBOL_GPL(bpfilter_process_sockopt); 15static void bpfilter_umh_cleanup(struct umh_info *info)
16{
17 mutex_lock(&bpfilter_ops.lock);
18 bpfilter_ops.stop = true;
19 fput(info->pipe_to_umh);
20 fput(info->pipe_from_umh);
21 info->pid = 0;
22 mutex_unlock(&bpfilter_ops.lock);
23}
12 24
13static int bpfilter_mbox_request(struct sock *sk, int optname, 25static int bpfilter_mbox_request(struct sock *sk, int optname,
14 char __user *optval, 26 char __user *optval,
15 unsigned int optlen, bool is_set) 27 unsigned int optlen, bool is_set)
16{ 28{
17 if (!bpfilter_process_sockopt) { 29 int err;
18 int err = request_module("bpfilter"); 30 mutex_lock(&bpfilter_ops.lock);
31 if (!bpfilter_ops.sockopt) {
32 mutex_unlock(&bpfilter_ops.lock);
33 err = request_module("bpfilter");
34 mutex_lock(&bpfilter_ops.lock);
19 35
20 if (err) 36 if (err)
21 return err; 37 goto out;
22 if (!bpfilter_process_sockopt) 38 if (!bpfilter_ops.sockopt) {
23 return -ECHILD; 39 err = -ECHILD;
40 goto out;
41 }
42 }
43 if (bpfilter_ops.stop) {
44 err = bpfilter_ops.start();
45 if (err)
46 goto out;
24 } 47 }
25 return bpfilter_process_sockopt(sk, optname, optval, optlen, is_set); 48 err = bpfilter_ops.sockopt(sk, optname, optval, optlen, is_set);
49out:
50 mutex_unlock(&bpfilter_ops.lock);
51 return err;
26} 52}
27 53
28int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, 54int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
@@ -41,3 +67,15 @@ int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
41 67
42 return bpfilter_mbox_request(sk, optname, optval, len, false); 68 return bpfilter_mbox_request(sk, optname, optval, len, false);
43} 69}
70
71static int __init bpfilter_sockopt_init(void)
72{
73 mutex_init(&bpfilter_ops.lock);
74 bpfilter_ops.stop = true;
75 bpfilter_ops.info.cmdline = "bpfilter_umh";
76 bpfilter_ops.info.cleanup = &bpfilter_umh_cleanup;
77
78 return 0;
79}
80
81module_init(bpfilter_sockopt_init);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 04ba321ae5ce..e258a00b4a3d 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1826,7 +1826,7 @@ put_tgt_net:
1826 if (fillargs.netnsid >= 0) 1826 if (fillargs.netnsid >= 0)
1827 put_net(tgt_net); 1827 put_net(tgt_net);
1828 1828
1829 return err < 0 ? err : skb->len; 1829 return skb->len ? : err;
1830} 1830}
1831 1831
1832static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh, 1832static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 5459f41fc26f..10e809b296ec 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -328,7 +328,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
328 skb->len += tailen; 328 skb->len += tailen;
329 skb->data_len += tailen; 329 skb->data_len += tailen;
330 skb->truesize += tailen; 330 skb->truesize += tailen;
331 if (sk) 331 if (sk && sk_fullsock(sk))
332 refcount_add(tailen, &sk->sk_wmem_alloc); 332 refcount_add(tailen, &sk->sk_wmem_alloc);
333 333
334 goto out; 334 goto out;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 6df95be96311..fe4f6a624238 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -203,7 +203,7 @@ static void fib_flush(struct net *net)
203 struct fib_table *tb; 203 struct fib_table *tb;
204 204
205 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) 205 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
206 flushed += fib_table_flush(net, tb); 206 flushed += fib_table_flush(net, tb, false);
207 } 207 }
208 208
209 if (flushed) 209 if (flushed)
@@ -1463,7 +1463,7 @@ static void ip_fib_net_exit(struct net *net)
1463 1463
1464 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) { 1464 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
1465 hlist_del(&tb->tb_hlist); 1465 hlist_del(&tb->tb_hlist);
1466 fib_table_flush(net, tb); 1466 fib_table_flush(net, tb, true);
1467 fib_free_table(tb); 1467 fib_free_table(tb);
1468 } 1468 }
1469 } 1469 }
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 237c9f72b265..a573e37e0615 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1856,7 +1856,7 @@ void fib_table_flush_external(struct fib_table *tb)
1856} 1856}
1857 1857
1858/* Caller must hold RTNL. */ 1858/* Caller must hold RTNL. */
1859int fib_table_flush(struct net *net, struct fib_table *tb) 1859int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
1860{ 1860{
1861 struct trie *t = (struct trie *)tb->tb_data; 1861 struct trie *t = (struct trie *)tb->tb_data;
1862 struct key_vector *pn = t->kv; 1862 struct key_vector *pn = t->kv;
@@ -1904,8 +1904,17 @@ int fib_table_flush(struct net *net, struct fib_table *tb)
1904 hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { 1904 hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
1905 struct fib_info *fi = fa->fa_info; 1905 struct fib_info *fi = fa->fa_info;
1906 1906
1907 if (!fi || !(fi->fib_flags & RTNH_F_DEAD) || 1907 if (!fi || tb->tb_id != fa->tb_id ||
1908 tb->tb_id != fa->tb_id) { 1908 (!(fi->fib_flags & RTNH_F_DEAD) &&
1909 !fib_props[fa->fa_type].error)) {
1910 slen = fa->fa_slen;
1911 continue;
1912 }
1913
1914 /* Do not flush error routes if network namespace is
1915 * not being dismantled
1916 */
1917 if (!flush_all && fib_props[fa->fa_type].error) {
1909 slen = fa->fa_slen; 1918 slen = fa->fa_slen;
1910 continue; 1919 continue;
1911 } 1920 }
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 0c9f171fb085..437070d1ffb1 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -1020,10 +1020,11 @@ static int gue_err(struct sk_buff *skb, u32 info)
1020{ 1020{
1021 int transport_offset = skb_transport_offset(skb); 1021 int transport_offset = skb_transport_offset(skb);
1022 struct guehdr *guehdr; 1022 struct guehdr *guehdr;
1023 size_t optlen; 1023 size_t len, optlen;
1024 int ret; 1024 int ret;
1025 1025
1026 if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr)) 1026 len = sizeof(struct udphdr) + sizeof(struct guehdr);
1027 if (!pskb_may_pull(skb, len))
1027 return -EINVAL; 1028 return -EINVAL;
1028 1029
1029 guehdr = (struct guehdr *)&udp_hdr(skb)[1]; 1030 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
@@ -1058,6 +1059,10 @@ static int gue_err(struct sk_buff *skb, u32 info)
1058 1059
1059 optlen = guehdr->hlen << 2; 1060 optlen = guehdr->hlen << 2;
1060 1061
1062 if (!pskb_may_pull(skb, len + optlen))
1063 return -EINVAL;
1064
1065 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
1061 if (validate_gue_flags(guehdr, optlen)) 1066 if (validate_gue_flags(guehdr, optlen))
1062 return -EINVAL; 1067 return -EINVAL;
1063 1068
@@ -1065,7 +1070,8 @@ static int gue_err(struct sk_buff *skb, u32 info)
1065 * recursion. Besides, this kind of encapsulation can't even be 1070 * recursion. Besides, this kind of encapsulation can't even be
1066 * configured currently. Discard this. 1071 * configured currently. Discard this.
1067 */ 1072 */
1068 if (guehdr->proto_ctype == IPPROTO_UDP) 1073 if (guehdr->proto_ctype == IPPROTO_UDP ||
1074 guehdr->proto_ctype == IPPROTO_UDPLITE)
1069 return -EOPNOTSUPP; 1075 return -EOPNOTSUPP;
1070 1076
1071 skb_set_transport_header(skb, -(int)sizeof(struct icmphdr)); 1077 skb_set_transport_header(skb, -(int)sizeof(struct icmphdr));
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index a4bf22ee3aed..7c4a41dc04bb 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -25,6 +25,7 @@
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <net/protocol.h> 26#include <net/protocol.h>
27#include <net/gre.h> 27#include <net/gre.h>
28#include <net/erspan.h>
28 29
29#include <net/icmp.h> 30#include <net/icmp.h>
30#include <net/route.h> 31#include <net/route.h>
@@ -119,6 +120,22 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
119 hdr_len += 4; 120 hdr_len += 4;
120 } 121 }
121 tpi->hdr_len = hdr_len; 122 tpi->hdr_len = hdr_len;
123
124 /* ERSPAN ver 1 and 2 protocol sets GRE key field
125 * to 0 and sets the configured key in the
126 * inner erspan header field
127 */
128 if (greh->protocol == htons(ETH_P_ERSPAN) ||
129 greh->protocol == htons(ETH_P_ERSPAN2)) {
130 struct erspan_base_hdr *ershdr;
131
132 if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr)))
133 return -EINVAL;
134
135 ershdr = (struct erspan_base_hdr *)options;
136 tpi->key = cpu_to_be32(get_session_id(ershdr));
137 }
138
122 return hdr_len; 139 return hdr_len;
123} 140}
124EXPORT_SYMBOL(gre_parse_header); 141EXPORT_SYMBOL(gre_parse_header);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 1a4e9ff02762..5731670c560b 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -108,6 +108,7 @@ static size_t inet_sk_attr_size(struct sock *sk,
108 + nla_total_size(1) /* INET_DIAG_TOS */ 108 + nla_total_size(1) /* INET_DIAG_TOS */
109 + nla_total_size(1) /* INET_DIAG_TCLASS */ 109 + nla_total_size(1) /* INET_DIAG_TCLASS */
110 + nla_total_size(4) /* INET_DIAG_MARK */ 110 + nla_total_size(4) /* INET_DIAG_MARK */
111 + nla_total_size(4) /* INET_DIAG_CLASS_ID */
111 + nla_total_size(sizeof(struct inet_diag_meminfo)) 112 + nla_total_size(sizeof(struct inet_diag_meminfo))
112 + nla_total_size(sizeof(struct inet_diag_msg)) 113 + nla_total_size(sizeof(struct inet_diag_msg))
113 + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) 114 + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
@@ -287,12 +288,19 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
287 goto errout; 288 goto errout;
288 } 289 }
289 290
290 if (ext & (1 << (INET_DIAG_CLASS_ID - 1))) { 291 if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
292 ext & (1 << (INET_DIAG_TCLASS - 1))) {
291 u32 classid = 0; 293 u32 classid = 0;
292 294
293#ifdef CONFIG_SOCK_CGROUP_DATA 295#ifdef CONFIG_SOCK_CGROUP_DATA
294 classid = sock_cgroup_classid(&sk->sk_cgrp_data); 296 classid = sock_cgroup_classid(&sk->sk_cgrp_data);
295#endif 297#endif
298 /* Fallback to socket priority if class id isn't set.
299 * Classful qdiscs use it as direct reference to class.
300 * For cgroup2 classid is always zero.
301 */
302 if (!classid)
303 classid = sk->sk_priority;
296 304
297 if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid)) 305 if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
298 goto errout; 306 goto errout;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index d757b9642d0d..be778599bfed 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -216,6 +216,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
216 atomic_set(&p->rid, 0); 216 atomic_set(&p->rid, 0);
217 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; 217 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
218 p->rate_tokens = 0; 218 p->rate_tokens = 0;
219 p->n_redirects = 0;
219 /* 60*HZ is arbitrary, but chosen enough high so that the first 220 /* 60*HZ is arbitrary, but chosen enough high so that the first
220 * calculation of tokens is at its maximum. 221 * calculation of tokens is at its maximum.
221 */ 222 */
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d1d09f3e5f9e..6ae89f2b541b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -268,20 +268,11 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
268 int len; 268 int len;
269 269
270 itn = net_generic(net, erspan_net_id); 270 itn = net_generic(net, erspan_net_id);
271 len = gre_hdr_len + sizeof(*ershdr);
272
273 /* Check based hdr len */
274 if (unlikely(!pskb_may_pull(skb, len)))
275 return PACKET_REJECT;
276 271
277 iph = ip_hdr(skb); 272 iph = ip_hdr(skb);
278 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len); 273 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
279 ver = ershdr->ver; 274 ver = ershdr->ver;
280 275
281 /* The original GRE header does not have key field,
282 * Use ERSPAN 10-bit session ID as key.
283 */
284 tpi->key = cpu_to_be32(get_session_id(ershdr));
285 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, 276 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
286 tpi->flags | TUNNEL_KEY, 277 tpi->flags | TUNNEL_KEY,
287 iph->saddr, iph->daddr, tpi->key); 278 iph->saddr, iph->daddr, tpi->key);
@@ -569,8 +560,7 @@ err_free_skb:
569 dev->stats.tx_dropped++; 560 dev->stats.tx_dropped++;
570} 561}
571 562
572static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, 563static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
573 __be16 proto)
574{ 564{
575 struct ip_tunnel *tunnel = netdev_priv(dev); 565 struct ip_tunnel *tunnel = netdev_priv(dev);
576 struct ip_tunnel_info *tun_info; 566 struct ip_tunnel_info *tun_info;
@@ -578,10 +568,10 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
578 struct erspan_metadata *md; 568 struct erspan_metadata *md;
579 struct rtable *rt = NULL; 569 struct rtable *rt = NULL;
580 bool truncate = false; 570 bool truncate = false;
571 __be16 df, proto;
581 struct flowi4 fl; 572 struct flowi4 fl;
582 int tunnel_hlen; 573 int tunnel_hlen;
583 int version; 574 int version;
584 __be16 df;
585 int nhoff; 575 int nhoff;
586 int thoff; 576 int thoff;
587 577
@@ -626,18 +616,20 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
626 if (version == 1) { 616 if (version == 1) {
627 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)), 617 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
628 ntohl(md->u.index), truncate, true); 618 ntohl(md->u.index), truncate, true);
619 proto = htons(ETH_P_ERSPAN);
629 } else if (version == 2) { 620 } else if (version == 2) {
630 erspan_build_header_v2(skb, 621 erspan_build_header_v2(skb,
631 ntohl(tunnel_id_to_key32(key->tun_id)), 622 ntohl(tunnel_id_to_key32(key->tun_id)),
632 md->u.md2.dir, 623 md->u.md2.dir,
633 get_hwid(&md->u.md2), 624 get_hwid(&md->u.md2),
634 truncate, true); 625 truncate, true);
626 proto = htons(ETH_P_ERSPAN2);
635 } else { 627 } else {
636 goto err_free_rt; 628 goto err_free_rt;
637 } 629 }
638 630
639 gre_build_header(skb, 8, TUNNEL_SEQ, 631 gre_build_header(skb, 8, TUNNEL_SEQ,
640 htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++)); 632 proto, 0, htonl(tunnel->o_seqno++));
641 633
642 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; 634 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
643 635
@@ -721,12 +713,13 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
721{ 713{
722 struct ip_tunnel *tunnel = netdev_priv(dev); 714 struct ip_tunnel *tunnel = netdev_priv(dev);
723 bool truncate = false; 715 bool truncate = false;
716 __be16 proto;
724 717
725 if (!pskb_inet_may_pull(skb)) 718 if (!pskb_inet_may_pull(skb))
726 goto free_skb; 719 goto free_skb;
727 720
728 if (tunnel->collect_md) { 721 if (tunnel->collect_md) {
729 erspan_fb_xmit(skb, dev, skb->protocol); 722 erspan_fb_xmit(skb, dev);
730 return NETDEV_TX_OK; 723 return NETDEV_TX_OK;
731 } 724 }
732 725
@@ -742,19 +735,22 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
742 } 735 }
743 736
744 /* Push ERSPAN header */ 737 /* Push ERSPAN header */
745 if (tunnel->erspan_ver == 1) 738 if (tunnel->erspan_ver == 1) {
746 erspan_build_header(skb, ntohl(tunnel->parms.o_key), 739 erspan_build_header(skb, ntohl(tunnel->parms.o_key),
747 tunnel->index, 740 tunnel->index,
748 truncate, true); 741 truncate, true);
749 else if (tunnel->erspan_ver == 2) 742 proto = htons(ETH_P_ERSPAN);
743 } else if (tunnel->erspan_ver == 2) {
750 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key), 744 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
751 tunnel->dir, tunnel->hwid, 745 tunnel->dir, tunnel->hwid,
752 truncate, true); 746 truncate, true);
753 else 747 proto = htons(ETH_P_ERSPAN2);
748 } else {
754 goto free_skb; 749 goto free_skb;
750 }
755 751
756 tunnel->parms.o_flags &= ~TUNNEL_KEY; 752 tunnel->parms.o_flags &= ~TUNNEL_KEY;
757 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN)); 753 __gre_xmit(skb, dev, &tunnel->parms.iph, proto);
758 return NETDEV_TX_OK; 754 return NETDEV_TX_OK;
759 755
760free_skb: 756free_skb:
@@ -1459,12 +1455,31 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1459{ 1455{
1460 struct ip_tunnel *t = netdev_priv(dev); 1456 struct ip_tunnel *t = netdev_priv(dev);
1461 struct ip_tunnel_parm *p = &t->parms; 1457 struct ip_tunnel_parm *p = &t->parms;
1458 __be16 o_flags = p->o_flags;
1459
1460 if (t->erspan_ver == 1 || t->erspan_ver == 2) {
1461 if (!t->collect_md)
1462 o_flags |= TUNNEL_KEY;
1463
1464 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1465 goto nla_put_failure;
1466
1467 if (t->erspan_ver == 1) {
1468 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1469 goto nla_put_failure;
1470 } else {
1471 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1472 goto nla_put_failure;
1473 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1474 goto nla_put_failure;
1475 }
1476 }
1462 1477
1463 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 1478 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1464 nla_put_be16(skb, IFLA_GRE_IFLAGS, 1479 nla_put_be16(skb, IFLA_GRE_IFLAGS,
1465 gre_tnl_flags_to_gre_flags(p->i_flags)) || 1480 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1466 nla_put_be16(skb, IFLA_GRE_OFLAGS, 1481 nla_put_be16(skb, IFLA_GRE_OFLAGS,
1467 gre_tnl_flags_to_gre_flags(p->o_flags)) || 1482 gre_tnl_flags_to_gre_flags(o_flags)) ||
1468 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || 1483 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1469 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || 1484 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1470 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) || 1485 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
@@ -1494,19 +1509,6 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1494 goto nla_put_failure; 1509 goto nla_put_failure;
1495 } 1510 }
1496 1511
1497 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1498 goto nla_put_failure;
1499
1500 if (t->erspan_ver == 1) {
1501 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1502 goto nla_put_failure;
1503 } else if (t->erspan_ver == 2) {
1504 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1505 goto nla_put_failure;
1506 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1507 goto nla_put_failure;
1508 }
1509
1510 return 0; 1512 return 0;
1511 1513
1512nla_put_failure: 1514nla_put_failure:
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 26921f6b3b92..51d8efba6de2 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -488,6 +488,7 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
488 goto drop; 488 goto drop;
489 } 489 }
490 490
491 iph = ip_hdr(skb);
491 skb->transport_header = skb->network_header + iph->ihl*4; 492 skb->transport_header = skb->network_header + iph->ihl*4;
492 493
493 /* Remove any debris in the socket control block */ 494 /* Remove any debris in the socket control block */
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index fffcc130900e..82f341e84fae 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -148,19 +148,17 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
148 148
149static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) 149static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
150{ 150{
151 __be16 _ports[2], *ports;
151 struct sockaddr_in sin; 152 struct sockaddr_in sin;
152 __be16 *ports;
153 int end;
154
155 end = skb_transport_offset(skb) + 4;
156 if (end > 0 && !pskb_may_pull(skb, end))
157 return;
158 153
159 /* All current transport protocols have the port numbers in the 154 /* All current transport protocols have the port numbers in the
160 * first four bytes of the transport header and this function is 155 * first four bytes of the transport header and this function is
161 * written with this assumption in mind. 156 * written with this assumption in mind.
162 */ 157 */
163 ports = (__be16 *)skb_transport_header(skb); 158 ports = skb_header_pointer(skb, skb_transport_offset(skb),
159 sizeof(_ports), &_ports);
160 if (!ports)
161 return;
164 162
165 sin.sin_family = AF_INET; 163 sin.sin_family = AF_INET;
166 sin.sin_addr.s_addr = ip_hdr(skb)->daddr; 164 sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index c4f5602308ed..054d01c16dc6 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -644,13 +644,19 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
644 dst = tnl_params->daddr; 644 dst = tnl_params->daddr;
645 if (dst == 0) { 645 if (dst == 0) {
646 /* NBMA tunnel */ 646 /* NBMA tunnel */
647 struct ip_tunnel_info *tun_info;
647 648
648 if (!skb_dst(skb)) { 649 if (!skb_dst(skb)) {
649 dev->stats.tx_fifo_errors++; 650 dev->stats.tx_fifo_errors++;
650 goto tx_error; 651 goto tx_error;
651 } 652 }
652 653
653 if (skb->protocol == htons(ETH_P_IP)) { 654 tun_info = skb_tunnel_info(skb);
655 if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX) &&
656 ip_tunnel_info_af(tun_info) == AF_INET &&
657 tun_info->key.u.ipv4.dst)
658 dst = tun_info->key.u.ipv4.dst;
659 else if (skb->protocol == htons(ETH_P_IP)) {
654 rt = skb_rtable(skb); 660 rt = skb_rtable(skb);
655 dst = rt_nexthop(rt, inner_iph->daddr); 661 dst = rt_nexthop(rt, inner_iph->daddr);
656 } 662 }
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index d7b43e700023..68a21bf75dd0 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -74,6 +74,33 @@ drop:
74 return 0; 74 return 0;
75} 75}
76 76
77static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi,
78 int encap_type)
79{
80 struct ip_tunnel *tunnel;
81 const struct iphdr *iph = ip_hdr(skb);
82 struct net *net = dev_net(skb->dev);
83 struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
84
85 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
86 iph->saddr, iph->daddr, 0);
87 if (tunnel) {
88 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
89 goto drop;
90
91 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
92
93 skb->dev = tunnel->dev;
94
95 return xfrm_input(skb, nexthdr, spi, encap_type);
96 }
97
98 return -EINVAL;
99drop:
100 kfree_skb(skb);
101 return 0;
102}
103
77static int vti_rcv(struct sk_buff *skb) 104static int vti_rcv(struct sk_buff *skb)
78{ 105{
79 XFRM_SPI_SKB_CB(skb)->family = AF_INET; 106 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
@@ -82,6 +109,14 @@ static int vti_rcv(struct sk_buff *skb)
82 return vti_input(skb, ip_hdr(skb)->protocol, 0, 0); 109 return vti_input(skb, ip_hdr(skb)->protocol, 0, 0);
83} 110}
84 111
112static int vti_rcv_ipip(struct sk_buff *skb)
113{
114 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
115 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
116
117 return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0);
118}
119
85static int vti_rcv_cb(struct sk_buff *skb, int err) 120static int vti_rcv_cb(struct sk_buff *skb, int err)
86{ 121{
87 unsigned short family; 122 unsigned short family;
@@ -435,6 +470,12 @@ static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = {
435 .priority = 100, 470 .priority = 100,
436}; 471};
437 472
473static struct xfrm_tunnel ipip_handler __read_mostly = {
474 .handler = vti_rcv_ipip,
475 .err_handler = vti4_err,
476 .priority = 0,
477};
478
438static int __net_init vti_init_net(struct net *net) 479static int __net_init vti_init_net(struct net *net)
439{ 480{
440 int err; 481 int err;
@@ -603,6 +644,13 @@ static int __init vti_init(void)
603 if (err < 0) 644 if (err < 0)
604 goto xfrm_proto_comp_failed; 645 goto xfrm_proto_comp_failed;
605 646
647 msg = "ipip tunnel";
648 err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
649 if (err < 0) {
650 pr_info("%s: cant't register tunnel\n",__func__);
651 goto xfrm_tunnel_failed;
652 }
653
606 msg = "netlink interface"; 654 msg = "netlink interface";
607 err = rtnl_link_register(&vti_link_ops); 655 err = rtnl_link_register(&vti_link_ops);
608 if (err < 0) 656 if (err < 0)
@@ -612,6 +660,8 @@ static int __init vti_init(void)
612 660
613rtnl_link_failed: 661rtnl_link_failed:
614 xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP); 662 xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
663xfrm_tunnel_failed:
664 xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
615xfrm_proto_comp_failed: 665xfrm_proto_comp_failed:
616 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); 666 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
617xfrm_proto_ah_failed: 667xfrm_proto_ah_failed:
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index b61977db9b7f..2a909e5f9ba0 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -846,9 +846,9 @@ static int clusterip_net_init(struct net *net)
846 846
847static void clusterip_net_exit(struct net *net) 847static void clusterip_net_exit(struct net *net)
848{ 848{
849#ifdef CONFIG_PROC_FS
849 struct clusterip_net *cn = clusterip_pernet(net); 850 struct clusterip_net *cn = clusterip_pernet(net);
850 851
851#ifdef CONFIG_PROC_FS
852 mutex_lock(&cn->mutex); 852 mutex_lock(&cn->mutex);
853 proc_remove(cn->procdir); 853 proc_remove(cn->procdir);
854 cn->procdir = NULL; 854 cn->procdir = NULL;
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index 2687db015b6f..fa2ba7c500e4 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -215,6 +215,7 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb,
215 215
216 /* Change outer to look like the reply to an incoming packet */ 216 /* Change outer to look like the reply to an incoming packet */
217 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); 217 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
218 target.dst.protonum = IPPROTO_ICMP;
218 if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip)) 219 if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip))
219 return 0; 220 return 0;
220 221
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
index a0aa13bcabda..0a8a60c1bf9a 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
@@ -105,6 +105,8 @@ static void fast_csum(struct snmp_ctx *ctx, unsigned char offset)
105int snmp_version(void *context, size_t hdrlen, unsigned char tag, 105int snmp_version(void *context, size_t hdrlen, unsigned char tag,
106 const void *data, size_t datalen) 106 const void *data, size_t datalen)
107{ 107{
108 if (datalen != 1)
109 return -EINVAL;
108 if (*(unsigned char *)data > 1) 110 if (*(unsigned char *)data > 1)
109 return -ENOTSUPP; 111 return -ENOTSUPP;
110 return 1; 112 return 1;
@@ -114,8 +116,11 @@ int snmp_helper(void *context, size_t hdrlen, unsigned char tag,
114 const void *data, size_t datalen) 116 const void *data, size_t datalen)
115{ 117{
116 struct snmp_ctx *ctx = (struct snmp_ctx *)context; 118 struct snmp_ctx *ctx = (struct snmp_ctx *)context;
117 __be32 *pdata = (__be32 *)data; 119 __be32 *pdata;
118 120
121 if (datalen != 4)
122 return -EINVAL;
123 pdata = (__be32 *)data;
119 if (*pdata == ctx->from) { 124 if (*pdata == ctx->from) {
120 pr_debug("%s: %pI4 to %pI4\n", __func__, 125 pr_debug("%s: %pI4 to %pI4\n", __func__,
121 (void *)&ctx->from, (void *)&ctx->to); 126 (void *)&ctx->from, (void *)&ctx->to);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ce92f73cf104..5163b64f8fb3 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -887,13 +887,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
887 /* No redirected packets during ip_rt_redirect_silence; 887 /* No redirected packets during ip_rt_redirect_silence;
888 * reset the algorithm. 888 * reset the algorithm.
889 */ 889 */
890 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) 890 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
891 peer->rate_tokens = 0; 891 peer->rate_tokens = 0;
892 peer->n_redirects = 0;
893 }
892 894
893 /* Too many ignored redirects; do not send anything 895 /* Too many ignored redirects; do not send anything
894 * set dst.rate_last to the last seen redirected packet. 896 * set dst.rate_last to the last seen redirected packet.
895 */ 897 */
896 if (peer->rate_tokens >= ip_rt_redirect_number) { 898 if (peer->n_redirects >= ip_rt_redirect_number) {
897 peer->rate_last = jiffies; 899 peer->rate_last = jiffies;
898 goto out_put_peer; 900 goto out_put_peer;
899 } 901 }
@@ -910,6 +912,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
910 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); 912 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
911 peer->rate_last = jiffies; 913 peer->rate_last = jiffies;
912 ++peer->rate_tokens; 914 ++peer->rate_tokens;
915 ++peer->n_redirects;
913#ifdef CONFIG_IP_ROUTE_VERBOSE 916#ifdef CONFIG_IP_ROUTE_VERBOSE
914 if (log_martians && 917 if (log_martians &&
915 peer->rate_tokens == ip_rt_redirect_number) 918 peer->rate_tokens == ip_rt_redirect_number)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 27e2f6837062..cf3c5095c10e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1186,7 +1186,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
1186 flags = msg->msg_flags; 1186 flags = msg->msg_flags;
1187 1187
1188 if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) { 1188 if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
1189 if (sk->sk_state != TCP_ESTABLISHED) { 1189 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
1190 err = -EINVAL; 1190 err = -EINVAL;
1191 goto out_err; 1191 goto out_err;
1192 } 1192 }
@@ -2528,6 +2528,7 @@ void tcp_write_queue_purge(struct sock *sk)
2528 sk_mem_reclaim(sk); 2528 sk_mem_reclaim(sk);
2529 tcp_clear_all_retrans_hints(tcp_sk(sk)); 2529 tcp_clear_all_retrans_hints(tcp_sk(sk));
2530 tcp_sk(sk)->packets_out = 0; 2530 tcp_sk(sk)->packets_out = 0;
2531 inet_csk(sk)->icsk_backoff = 0;
2531} 2532}
2532 2533
2533int tcp_disconnect(struct sock *sk, int flags) 2534int tcp_disconnect(struct sock *sk, int flags)
@@ -2576,7 +2577,6 @@ int tcp_disconnect(struct sock *sk, int flags)
2576 tp->write_seq += tp->max_window + 2; 2577 tp->write_seq += tp->max_window + 2;
2577 if (tp->write_seq == 0) 2578 if (tp->write_seq == 0)
2578 tp->write_seq = 1; 2579 tp->write_seq = 1;
2579 icsk->icsk_backoff = 0;
2580 tp->snd_cwnd = 2; 2580 tp->snd_cwnd = 2;
2581 icsk->icsk_probes_out = 0; 2581 icsk->icsk_probes_out = 0;
2582 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 2582 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index efc6fef692ff..ec3cea9d6828 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -536,12 +536,15 @@ int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
536 if (sock_owned_by_user(sk)) 536 if (sock_owned_by_user(sk))
537 break; 537 break;
538 538
539 skb = tcp_rtx_queue_head(sk);
540 if (WARN_ON_ONCE(!skb))
541 break;
542
539 icsk->icsk_backoff--; 543 icsk->icsk_backoff--;
540 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : 544 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
541 TCP_TIMEOUT_INIT; 545 TCP_TIMEOUT_INIT;
542 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); 546 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
543 547
544 skb = tcp_rtx_queue_head(sk);
545 548
546 tcp_mstamp_refresh(tp); 549 tcp_mstamp_refresh(tp);
547 delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb)); 550 delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 730bc44dbad9..ccc78f3a4b60 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2347,6 +2347,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2347 /* "skb_mstamp_ns" is used as a start point for the retransmit timer */ 2347 /* "skb_mstamp_ns" is used as a start point for the retransmit timer */
2348 skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache; 2348 skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
2349 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); 2349 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
2350 tcp_init_tso_segs(skb, mss_now);
2350 goto repair; /* Skip network transmission */ 2351 goto repair; /* Skip network transmission */
2351 } 2352 }
2352 2353
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index f87dbc78b6bc..71a29e9c0620 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -226,7 +226,7 @@ static int tcp_write_timeout(struct sock *sk)
226 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 226 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
227 if (icsk->icsk_retransmits) { 227 if (icsk->icsk_retransmits) {
228 dst_negative_advice(sk); 228 dst_negative_advice(sk);
229 } else if (!tp->syn_data && !tp->syn_fastopen) { 229 } else {
230 sk_rethink_txhash(sk); 230 sk_rethink_txhash(sk);
231 } 231 }
232 retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; 232 retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 3fb0ed5e4789..372fdc5381a9 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -562,10 +562,12 @@ static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info)
562 562
563 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 563 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
564 int (*handler)(struct sk_buff *skb, u32 info); 564 int (*handler)(struct sk_buff *skb, u32 info);
565 const struct ip_tunnel_encap_ops *encap;
565 566
566 if (!iptun_encaps[i]) 567 encap = rcu_dereference(iptun_encaps[i]);
568 if (!encap)
567 continue; 569 continue;
568 handler = rcu_dereference(iptun_encaps[i]->err_handler); 570 handler = encap->err_handler;
569 if (handler && !handler(skb, info)) 571 if (handler && !handler(skb, info))
570 return 0; 572 return 0;
571 } 573 }
@@ -847,15 +849,23 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
847 const int hlen = skb_network_header_len(skb) + 849 const int hlen = skb_network_header_len(skb) +
848 sizeof(struct udphdr); 850 sizeof(struct udphdr);
849 851
850 if (hlen + cork->gso_size > cork->fragsize) 852 if (hlen + cork->gso_size > cork->fragsize) {
853 kfree_skb(skb);
851 return -EINVAL; 854 return -EINVAL;
852 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) 855 }
856 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
857 kfree_skb(skb);
853 return -EINVAL; 858 return -EINVAL;
854 if (sk->sk_no_check_tx) 859 }
860 if (sk->sk_no_check_tx) {
861 kfree_skb(skb);
855 return -EINVAL; 862 return -EINVAL;
863 }
856 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || 864 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
857 dst_xfrm(skb_dst(skb))) 865 dst_xfrm(skb_dst(skb))) {
866 kfree_skb(skb);
858 return -EIO; 867 return -EIO;
868 }
859 869
860 skb_shinfo(skb)->gso_size = cork->gso_size; 870 skb_shinfo(skb)->gso_size = cork->gso_size;
861 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 871 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
@@ -1918,7 +1928,7 @@ void udp_lib_rehash(struct sock *sk, u16 newhash)
1918} 1928}
1919EXPORT_SYMBOL(udp_lib_rehash); 1929EXPORT_SYMBOL(udp_lib_rehash);
1920 1930
1921static void udp_v4_rehash(struct sock *sk) 1931void udp_v4_rehash(struct sock *sk)
1922{ 1932{
1923 u16 new_hash = ipv4_portaddr_hash(sock_net(sk), 1933 u16 new_hash = ipv4_portaddr_hash(sock_net(sk),
1924 inet_sk(sk)->inet_rcv_saddr, 1934 inet_sk(sk)->inet_rcv_saddr,
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index 322672655419..6b2fa77eeb1c 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -10,6 +10,7 @@ int __udp4_lib_rcv(struct sk_buff *, struct udp_table *, int);
10int __udp4_lib_err(struct sk_buff *, u32, struct udp_table *); 10int __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
11 11
12int udp_v4_get_port(struct sock *sk, unsigned short snum); 12int udp_v4_get_port(struct sock *sk, unsigned short snum);
13void udp_v4_rehash(struct sock *sk);
13 14
14int udp_setsockopt(struct sock *sk, int level, int optname, 15int udp_setsockopt(struct sock *sk, int level, int optname,
15 char __user *optval, unsigned int optlen); 16 char __user *optval, unsigned int optlen);
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 39c7f17d916f..3c94b8f0ff27 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -53,6 +53,7 @@ struct proto udplite_prot = {
53 .sendpage = udp_sendpage, 53 .sendpage = udp_sendpage,
54 .hash = udp_lib_hash, 54 .hash = udp_lib_hash,
55 .unhash = udp_lib_unhash, 55 .unhash = udp_lib_unhash,
56 .rehash = udp_v4_rehash,
56 .get_port = udp_v4_get_port, 57 .get_port = udp_v4_get_port,
57 .memory_allocated = &udp_memory_allocated, 58 .memory_allocated = &udp_memory_allocated,
58 .sysctl_mem = sysctl_udp_mem, 59 .sysctl_mem = sysctl_udp_mem,