aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/fou.c2
-rw-r--r--net/ipv4/geneve.c3
-rw-r--r--net/ipv4/gre_offload.c4
-rw-r--r--net/ipv4/inet_fragment.c4
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c91
-rw-r--r--net/ipv4/netfilter/nft_masq_ipv4.c1
-rw-r--r--net/ipv4/route.c1
-rw-r--r--net/ipv4/tcp.c59
-rw-r--r--net/ipv4/tcp_input.c60
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/udp_offload.c2
15 files changed, 134 insertions, 105 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 92db7a69f2b9..8b7fe5b03906 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1246,7 +1246,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1246 1246
1247 encap = SKB_GSO_CB(skb)->encap_level > 0; 1247 encap = SKB_GSO_CB(skb)->encap_level > 0;
1248 if (encap) 1248 if (encap)
1249 features = skb->dev->hw_enc_features & netif_skb_features(skb); 1249 features &= skb->dev->hw_enc_features;
1250 SKB_GSO_CB(skb)->encap_level += ihl; 1250 SKB_GSO_CB(skb)->encap_level += ihl;
1251 1251
1252 skb_reset_transport_header(skb); 1252 skb_reset_transport_header(skb);
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 32e78924e246..606c520ffd5a 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -133,6 +133,8 @@ static int fou_gro_complete(struct sk_buff *skb, int nhoff)
133 int err = -ENOSYS; 133 int err = -ENOSYS;
134 const struct net_offload **offloads; 134 const struct net_offload **offloads;
135 135
136 udp_tunnel_gro_complete(skb, nhoff);
137
136 rcu_read_lock(); 138 rcu_read_lock();
137 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; 139 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
138 ops = rcu_dereference(offloads[proto]); 140 ops = rcu_dereference(offloads[proto]);
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
index 065cd94c640c..dedb21e99914 100644
--- a/net/ipv4/geneve.c
+++ b/net/ipv4/geneve.c
@@ -144,6 +144,8 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
144 gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); 144 gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
145 geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); 145 geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
146 146
147 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
148
147 return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst, 149 return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst,
148 tos, ttl, df, src_port, dst_port, xnet); 150 tos, ttl, df, src_port, dst_port, xnet);
149} 151}
@@ -364,6 +366,7 @@ late_initcall(geneve_init_module);
364static void __exit geneve_cleanup_module(void) 366static void __exit geneve_cleanup_module(void)
365{ 367{
366 destroy_workqueue(geneve_wq); 368 destroy_workqueue(geneve_wq);
369 unregister_pernet_subsys(&geneve_net_ops);
367} 370}
368module_exit(geneve_cleanup_module); 371module_exit(geneve_cleanup_module);
369 372
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index ccda09628de7..bb5947b0ce2d 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -47,7 +47,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
47 47
48 greh = (struct gre_base_hdr *)skb_transport_header(skb); 48 greh = (struct gre_base_hdr *)skb_transport_header(skb);
49 49
50 ghl = skb_inner_network_header(skb) - skb_transport_header(skb); 50 ghl = skb_inner_mac_header(skb) - skb_transport_header(skb);
51 if (unlikely(ghl < sizeof(*greh))) 51 if (unlikely(ghl < sizeof(*greh)))
52 goto out; 52 goto out;
53 53
@@ -68,7 +68,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
68 skb->mac_len = skb_inner_network_offset(skb); 68 skb->mac_len = skb_inner_network_offset(skb);
69 69
70 /* segment inner packet. */ 70 /* segment inner packet. */
71 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); 71 enc_features = skb->dev->hw_enc_features & features;
72 segs = skb_mac_gso_segment(skb, enc_features); 72 segs = skb_mac_gso_segment(skb, enc_features);
73 if (IS_ERR_OR_NULL(segs)) { 73 if (IS_ERR_OR_NULL(segs)) {
74 skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len); 74 skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len);
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 9eb89f3f0ee4..19419b60cb37 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -146,7 +146,6 @@ evict_again:
146 atomic_inc(&fq->refcnt); 146 atomic_inc(&fq->refcnt);
147 spin_unlock(&hb->chain_lock); 147 spin_unlock(&hb->chain_lock);
148 del_timer_sync(&fq->timer); 148 del_timer_sync(&fq->timer);
149 WARN_ON(atomic_read(&fq->refcnt) != 1);
150 inet_frag_put(fq, f); 149 inet_frag_put(fq, f);
151 goto evict_again; 150 goto evict_again;
152 } 151 }
@@ -285,7 +284,8 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
285 struct inet_frag_bucket *hb; 284 struct inet_frag_bucket *hb;
286 285
287 hb = get_frag_bucket_locked(fq, f); 286 hb = get_frag_bucket_locked(fq, f);
288 hlist_del(&fq->list); 287 if (!(fq->flags & INET_FRAG_EVICTED))
288 hlist_del(&fq->list);
289 spin_unlock(&hb->chain_lock); 289 spin_unlock(&hb->chain_lock);
290} 290}
291 291
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 88e5ef2c7f51..bc6471d4abcd 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -231,7 +231,7 @@ static int ip_finish_output_gso(struct sk_buff *skb)
231 */ 231 */
232 features = netif_skb_features(skb); 232 features = netif_skb_features(skb);
233 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 233 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
234 if (IS_ERR(segs)) { 234 if (IS_ERR_OR_NULL(segs)) {
235 kfree_skb(skb); 235 kfree_skb(skb);
236 return -ENOMEM; 236 return -ENOMEM;
237 } 237 }
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index c373a9ad4555..9daf2177dc00 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -195,7 +195,7 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
195 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { 195 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
196 if (!CMSG_OK(msg, cmsg)) 196 if (!CMSG_OK(msg, cmsg))
197 return -EINVAL; 197 return -EINVAL;
198#if defined(CONFIG_IPV6) 198#if IS_ENABLED(CONFIG_IPV6)
199 if (allow_ipv6 && 199 if (allow_ipv6 &&
200 cmsg->cmsg_level == SOL_IPV6 && 200 cmsg->cmsg_level == SOL_IPV6 &&
201 cmsg->cmsg_type == IPV6_PKTINFO) { 201 cmsg->cmsg_type == IPV6_PKTINFO) {
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index b023b4eb1a96..1baaa83dfe5c 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -6,48 +6,45 @@
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8 8
9#include <linux/module.h>
9#include <net/ip.h> 10#include <net/ip.h>
10#include <net/tcp.h> 11#include <net/tcp.h>
11#include <net/route.h> 12#include <net/route.h>
12#include <net/dst.h> 13#include <net/dst.h>
13#include <linux/netfilter_ipv4.h> 14#include <linux/netfilter_ipv4.h>
15#include <net/netfilter/ipv4/nf_reject.h>
14 16
15/* Send RST reply */ 17const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
16void nf_send_reset(struct sk_buff *oldskb, int hook) 18 struct tcphdr *_oth, int hook)
17{ 19{
18 struct sk_buff *nskb;
19 const struct iphdr *oiph;
20 struct iphdr *niph;
21 const struct tcphdr *oth; 20 const struct tcphdr *oth;
22 struct tcphdr _otcph, *tcph;
23 21
24 /* IP header checks: fragment. */ 22 /* IP header checks: fragment. */
25 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) 23 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
26 return; 24 return NULL;
27 25
28 oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), 26 oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb),
29 sizeof(_otcph), &_otcph); 27 sizeof(struct tcphdr), _oth);
30 if (oth == NULL) 28 if (oth == NULL)
31 return; 29 return NULL;
32 30
33 /* No RST for RST. */ 31 /* No RST for RST. */
34 if (oth->rst) 32 if (oth->rst)
35 return; 33 return NULL;
36
37 if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
38 return;
39 34
40 /* Check checksum */ 35 /* Check checksum */
41 if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) 36 if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
42 return; 37 return NULL;
43 oiph = ip_hdr(oldskb);
44 38
45 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + 39 return oth;
46 LL_MAX_HEADER, GFP_ATOMIC); 40}
47 if (!nskb) 41EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_get);
48 return;
49 42
50 skb_reserve(nskb, LL_MAX_HEADER); 43struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
44 const struct sk_buff *oldskb,
45 __be16 protocol, int ttl)
46{
47 struct iphdr *niph, *oiph = ip_hdr(oldskb);
51 48
52 skb_reset_network_header(nskb); 49 skb_reset_network_header(nskb);
53 niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); 50 niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
@@ -56,10 +53,23 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
56 niph->tos = 0; 53 niph->tos = 0;
57 niph->id = 0; 54 niph->id = 0;
58 niph->frag_off = htons(IP_DF); 55 niph->frag_off = htons(IP_DF);
59 niph->protocol = IPPROTO_TCP; 56 niph->protocol = protocol;
60 niph->check = 0; 57 niph->check = 0;
61 niph->saddr = oiph->daddr; 58 niph->saddr = oiph->daddr;
62 niph->daddr = oiph->saddr; 59 niph->daddr = oiph->saddr;
60 niph->ttl = ttl;
61
62 nskb->protocol = htons(ETH_P_IP);
63
64 return niph;
65}
66EXPORT_SYMBOL_GPL(nf_reject_iphdr_put);
67
68void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
69 const struct tcphdr *oth)
70{
71 struct iphdr *niph = ip_hdr(nskb);
72 struct tcphdr *tcph;
63 73
64 skb_reset_transport_header(nskb); 74 skb_reset_transport_header(nskb);
65 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); 75 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
@@ -68,9 +78,9 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
68 tcph->dest = oth->source; 78 tcph->dest = oth->source;
69 tcph->doff = sizeof(struct tcphdr) / 4; 79 tcph->doff = sizeof(struct tcphdr) / 4;
70 80
71 if (oth->ack) 81 if (oth->ack) {
72 tcph->seq = oth->ack_seq; 82 tcph->seq = oth->ack_seq;
73 else { 83 } else {
74 tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + 84 tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
75 oldskb->len - ip_hdrlen(oldskb) - 85 oldskb->len - ip_hdrlen(oldskb) -
76 (oth->doff << 2)); 86 (oth->doff << 2));
@@ -83,16 +93,43 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
83 nskb->ip_summed = CHECKSUM_PARTIAL; 93 nskb->ip_summed = CHECKSUM_PARTIAL;
84 nskb->csum_start = (unsigned char *)tcph - nskb->head; 94 nskb->csum_start = (unsigned char *)tcph - nskb->head;
85 nskb->csum_offset = offsetof(struct tcphdr, check); 95 nskb->csum_offset = offsetof(struct tcphdr, check);
96}
97EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put);
98
99/* Send RST reply */
100void nf_send_reset(struct sk_buff *oldskb, int hook)
101{
102 struct sk_buff *nskb;
103 const struct iphdr *oiph;
104 struct iphdr *niph;
105 const struct tcphdr *oth;
106 struct tcphdr _oth;
107
108 oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
109 if (!oth)
110 return;
111
112 if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
113 return;
114
115 oiph = ip_hdr(oldskb);
116
117 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
118 LL_MAX_HEADER, GFP_ATOMIC);
119 if (!nskb)
120 return;
86 121
87 /* ip_route_me_harder expects skb->dst to be set */ 122 /* ip_route_me_harder expects skb->dst to be set */
88 skb_dst_set_noref(nskb, skb_dst(oldskb)); 123 skb_dst_set_noref(nskb, skb_dst(oldskb));
89 124
90 nskb->protocol = htons(ETH_P_IP); 125 skb_reserve(nskb, LL_MAX_HEADER);
126 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
127 ip4_dst_hoplimit(skb_dst(nskb)));
128 nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
129
91 if (ip_route_me_harder(nskb, RTN_UNSPEC)) 130 if (ip_route_me_harder(nskb, RTN_UNSPEC))
92 goto free_nskb; 131 goto free_nskb;
93 132
94 niph->ttl = ip4_dst_hoplimit(skb_dst(nskb));
95
96 /* "Never happens" */ 133 /* "Never happens" */
97 if (nskb->len > dst_mtu(skb_dst(nskb))) 134 if (nskb->len > dst_mtu(skb_dst(nskb)))
98 goto free_nskb; 135 goto free_nskb;
@@ -125,3 +162,5 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
125 kfree_skb(nskb); 162 kfree_skb(nskb);
126} 163}
127EXPORT_SYMBOL_GPL(nf_send_reset); 164EXPORT_SYMBOL_GPL(nf_send_reset);
165
166MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c
index 1c636d6b5b50..c1023c445920 100644
--- a/net/ipv4/netfilter/nft_masq_ipv4.c
+++ b/net/ipv4/netfilter/nft_masq_ipv4.c
@@ -39,6 +39,7 @@ static const struct nft_expr_ops nft_masq_ipv4_ops = {
39 .eval = nft_masq_ipv4_eval, 39 .eval = nft_masq_ipv4_eval,
40 .init = nft_masq_init, 40 .init = nft_masq_init,
41 .dump = nft_masq_dump, 41 .dump = nft_masq_dump,
42 .validate = nft_masq_validate,
42}; 43};
43 44
44static struct nft_expr_type nft_masq_ipv4_type __read_mostly = { 45static struct nft_expr_type nft_masq_ipv4_type __read_mostly = {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 2d4ae469b471..6a2155b02602 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1798,6 +1798,7 @@ local_input:
1798no_route: 1798no_route:
1799 RT_CACHE_STAT_INC(in_no_route); 1799 RT_CACHE_STAT_INC(in_no_route);
1800 res.type = RTN_UNREACHABLE; 1800 res.type = RTN_UNREACHABLE;
1801 res.fi = NULL;
1801 goto local_input; 1802 goto local_input;
1802 1803
1803 /* 1804 /*
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1bec4e76d88c..39ec0c379545 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2868,61 +2868,42 @@ EXPORT_SYMBOL(compat_tcp_getsockopt);
2868#endif 2868#endif
2869 2869
2870#ifdef CONFIG_TCP_MD5SIG 2870#ifdef CONFIG_TCP_MD5SIG
2871static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly; 2871static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
2872static DEFINE_MUTEX(tcp_md5sig_mutex); 2872static DEFINE_MUTEX(tcp_md5sig_mutex);
2873 2873static bool tcp_md5sig_pool_populated = false;
2874static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
2875{
2876 int cpu;
2877
2878 for_each_possible_cpu(cpu) {
2879 struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
2880
2881 if (p->md5_desc.tfm)
2882 crypto_free_hash(p->md5_desc.tfm);
2883 }
2884 free_percpu(pool);
2885}
2886 2874
2887static void __tcp_alloc_md5sig_pool(void) 2875static void __tcp_alloc_md5sig_pool(void)
2888{ 2876{
2889 int cpu; 2877 int cpu;
2890 struct tcp_md5sig_pool __percpu *pool;
2891
2892 pool = alloc_percpu(struct tcp_md5sig_pool);
2893 if (!pool)
2894 return;
2895 2878
2896 for_each_possible_cpu(cpu) { 2879 for_each_possible_cpu(cpu) {
2897 struct crypto_hash *hash; 2880 if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) {
2898 2881 struct crypto_hash *hash;
2899 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2900 if (IS_ERR_OR_NULL(hash))
2901 goto out_free;
2902 2882
2903 per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash; 2883 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2884 if (IS_ERR_OR_NULL(hash))
2885 return;
2886 per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash;
2887 }
2904 } 2888 }
2905 /* before setting tcp_md5sig_pool, we must commit all writes 2889 /* before setting tcp_md5sig_pool_populated, we must commit all writes
2906 * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool() 2890 * to memory. See smp_rmb() in tcp_get_md5sig_pool()
2907 */ 2891 */
2908 smp_wmb(); 2892 smp_wmb();
2909 tcp_md5sig_pool = pool; 2893 tcp_md5sig_pool_populated = true;
2910 return;
2911out_free:
2912 __tcp_free_md5sig_pool(pool);
2913} 2894}
2914 2895
2915bool tcp_alloc_md5sig_pool(void) 2896bool tcp_alloc_md5sig_pool(void)
2916{ 2897{
2917 if (unlikely(!tcp_md5sig_pool)) { 2898 if (unlikely(!tcp_md5sig_pool_populated)) {
2918 mutex_lock(&tcp_md5sig_mutex); 2899 mutex_lock(&tcp_md5sig_mutex);
2919 2900
2920 if (!tcp_md5sig_pool) 2901 if (!tcp_md5sig_pool_populated)
2921 __tcp_alloc_md5sig_pool(); 2902 __tcp_alloc_md5sig_pool();
2922 2903
2923 mutex_unlock(&tcp_md5sig_mutex); 2904 mutex_unlock(&tcp_md5sig_mutex);
2924 } 2905 }
2925 return tcp_md5sig_pool != NULL; 2906 return tcp_md5sig_pool_populated;
2926} 2907}
2927EXPORT_SYMBOL(tcp_alloc_md5sig_pool); 2908EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2928 2909
@@ -2936,13 +2917,13 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2936 */ 2917 */
2937struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) 2918struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
2938{ 2919{
2939 struct tcp_md5sig_pool __percpu *p;
2940
2941 local_bh_disable(); 2920 local_bh_disable();
2942 p = ACCESS_ONCE(tcp_md5sig_pool);
2943 if (p)
2944 return raw_cpu_ptr(p);
2945 2921
2922 if (tcp_md5sig_pool_populated) {
2923 /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
2924 smp_rmb();
2925 return this_cpu_ptr(&tcp_md5sig_pool);
2926 }
2946 local_bh_enable(); 2927 local_bh_enable();
2947 return NULL; 2928 return NULL;
2948} 2929}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a12b455928e5..88fa2d160685 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2315,6 +2315,35 @@ static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
2315 2315
2316/* Undo procedures. */ 2316/* Undo procedures. */
2317 2317
2318/* We can clear retrans_stamp when there are no retransmissions in the
2319 * window. It would seem that it is trivially available for us in
2320 * tp->retrans_out, however, that kind of assumptions doesn't consider
2321 * what will happen if errors occur when sending retransmission for the
2322 * second time. ...It could the that such segment has only
2323 * TCPCB_EVER_RETRANS set at the present time. It seems that checking
2324 * the head skb is enough except for some reneging corner cases that
2325 * are not worth the effort.
2326 *
2327 * Main reason for all this complexity is the fact that connection dying
2328 * time now depends on the validity of the retrans_stamp, in particular,
2329 * that successive retransmissions of a segment must not advance
2330 * retrans_stamp under any conditions.
2331 */
2332static bool tcp_any_retrans_done(const struct sock *sk)
2333{
2334 const struct tcp_sock *tp = tcp_sk(sk);
2335 struct sk_buff *skb;
2336
2337 if (tp->retrans_out)
2338 return true;
2339
2340 skb = tcp_write_queue_head(sk);
2341 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
2342 return true;
2343
2344 return false;
2345}
2346
2318#if FASTRETRANS_DEBUG > 1 2347#if FASTRETRANS_DEBUG > 1
2319static void DBGUNDO(struct sock *sk, const char *msg) 2348static void DBGUNDO(struct sock *sk, const char *msg)
2320{ 2349{
@@ -2410,6 +2439,8 @@ static bool tcp_try_undo_recovery(struct sock *sk)
2410 * is ACKed. For Reno it is MUST to prevent false 2439 * is ACKed. For Reno it is MUST to prevent false
2411 * fast retransmits (RFC2582). SACK TCP is safe. */ 2440 * fast retransmits (RFC2582). SACK TCP is safe. */
2412 tcp_moderate_cwnd(tp); 2441 tcp_moderate_cwnd(tp);
2442 if (!tcp_any_retrans_done(sk))
2443 tp->retrans_stamp = 0;
2413 return true; 2444 return true;
2414 } 2445 }
2415 tcp_set_ca_state(sk, TCP_CA_Open); 2446 tcp_set_ca_state(sk, TCP_CA_Open);
@@ -2430,35 +2461,6 @@ static bool tcp_try_undo_dsack(struct sock *sk)
2430 return false; 2461 return false;
2431} 2462}
2432 2463
2433/* We can clear retrans_stamp when there are no retransmissions in the
2434 * window. It would seem that it is trivially available for us in
2435 * tp->retrans_out, however, that kind of assumptions doesn't consider
2436 * what will happen if errors occur when sending retransmission for the
2437 * second time. ...It could the that such segment has only
2438 * TCPCB_EVER_RETRANS set at the present time. It seems that checking
2439 * the head skb is enough except for some reneging corner cases that
2440 * are not worth the effort.
2441 *
2442 * Main reason for all this complexity is the fact that connection dying
2443 * time now depends on the validity of the retrans_stamp, in particular,
2444 * that successive retransmissions of a segment must not advance
2445 * retrans_stamp under any conditions.
2446 */
2447static bool tcp_any_retrans_done(const struct sock *sk)
2448{
2449 const struct tcp_sock *tp = tcp_sk(sk);
2450 struct sk_buff *skb;
2451
2452 if (tp->retrans_out)
2453 return true;
2454
2455 skb = tcp_write_queue_head(sk);
2456 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
2457 return true;
2458
2459 return false;
2460}
2461
2462/* Undo during loss recovery after partial ACK or using F-RTO. */ 2464/* Undo during loss recovery after partial ACK or using F-RTO. */
2463static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) 2465static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
2464{ 2466{
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 94d1a7757ff7..9c7d7621466b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -206,8 +206,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
206 inet->inet_dport = usin->sin_port; 206 inet->inet_dport = usin->sin_port;
207 inet->inet_daddr = daddr; 207 inet->inet_daddr = daddr;
208 208
209 inet_set_txhash(sk);
210
211 inet_csk(sk)->icsk_ext_hdr_len = 0; 209 inet_csk(sk)->icsk_ext_hdr_len = 0;
212 if (inet_opt) 210 if (inet_opt)
213 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; 211 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
@@ -224,6 +222,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
224 if (err) 222 if (err)
225 goto failure; 223 goto failure;
226 224
225 inet_set_txhash(sk);
226
227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport, 227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 inet->inet_sport, inet->inet_dport, sk); 228 inet->inet_sport, inet->inet_dport, sk);
229 if (IS_ERR(rt)) { 229 if (IS_ERR(rt)) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3af21296d967..a3d453b94747 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2126,7 +2126,7 @@ bool tcp_schedule_loss_probe(struct sock *sk)
2126static bool skb_still_in_host_queue(const struct sock *sk, 2126static bool skb_still_in_host_queue(const struct sock *sk,
2127 const struct sk_buff *skb) 2127 const struct sk_buff *skb)
2128{ 2128{
2129 if (unlikely(skb_fclone_busy(skb))) { 2129 if (unlikely(skb_fclone_busy(sk, skb))) {
2130 NET_INC_STATS_BH(sock_net(sk), 2130 NET_INC_STATS_BH(sock_net(sk),
2131 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); 2131 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
2132 return true; 2132 return true;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 507310ef4b56..6480cea7aa53 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -58,7 +58,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
58 skb->encap_hdr_csum = 1; 58 skb->encap_hdr_csum = 1;
59 59
60 /* segment inner packet. */ 60 /* segment inner packet. */
61 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); 61 enc_features = skb->dev->hw_enc_features & features;
62 segs = gso_inner_segment(skb, enc_features); 62 segs = gso_inner_segment(skb, enc_features);
63 if (IS_ERR_OR_NULL(segs)) { 63 if (IS_ERR_OR_NULL(segs)) {
64 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, 64 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,