aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/ip_output.c13
-rw-r--r--net/ipv4/ip_vti.c14
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_output.c14
-rw-r--r--net/ipv4/xfrm4_policy.c1
7 files changed, 39 insertions, 16 deletions
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 7bd8983dbfcf..96da9c77deca 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -287,7 +287,7 @@ begintw:
287 if (unlikely(!INET_TW_MATCH(sk, net, acookie, 287 if (unlikely(!INET_TW_MATCH(sk, net, acookie,
288 saddr, daddr, ports, 288 saddr, daddr, ports,
289 dif))) { 289 dif))) {
290 sock_put(sk); 290 inet_twsk_put(inet_twsk(sk));
291 goto begintw; 291 goto begintw;
292 } 292 }
293 goto out; 293 goto out;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index a04d872c54f9..3982eabf61e1 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -772,15 +772,20 @@ static inline int ip_ufo_append_data(struct sock *sk,
772 /* initialize protocol header pointer */ 772 /* initialize protocol header pointer */
773 skb->transport_header = skb->network_header + fragheaderlen; 773 skb->transport_header = skb->network_header + fragheaderlen;
774 774
775 skb->ip_summed = CHECKSUM_PARTIAL;
776 skb->csum = 0; 775 skb->csum = 0;
777 776
778 /* specify the length of each IP datagram fragment */ 777
779 skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
780 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
781 __skb_queue_tail(queue, skb); 778 __skb_queue_tail(queue, skb);
779 } else if (skb_is_gso(skb)) {
780 goto append;
782 } 781 }
783 782
783 skb->ip_summed = CHECKSUM_PARTIAL;
784 /* specify the length of each IP datagram fragment */
785 skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
786 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
787
788append:
784 return skb_append_datato_frags(sk, skb, getfrag, from, 789 return skb_append_datato_frags(sk, skb, getfrag, from,
785 (length - transhdrlen)); 790 (length - transhdrlen));
786} 791}
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index e805e7b3030e..6e87f853d033 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -125,8 +125,17 @@ static int vti_rcv(struct sk_buff *skb)
125 iph->saddr, iph->daddr, 0); 125 iph->saddr, iph->daddr, 0);
126 if (tunnel != NULL) { 126 if (tunnel != NULL) {
127 struct pcpu_tstats *tstats; 127 struct pcpu_tstats *tstats;
128 u32 oldmark = skb->mark;
129 int ret;
128 130
129 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 131
132 /* temporarily mark the skb with the tunnel o_key, to
133 * only match policies with this mark.
134 */
135 skb->mark = be32_to_cpu(tunnel->parms.o_key);
136 ret = xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb);
137 skb->mark = oldmark;
138 if (!ret)
130 return -1; 139 return -1;
131 140
132 tstats = this_cpu_ptr(tunnel->dev->tstats); 141 tstats = this_cpu_ptr(tunnel->dev->tstats);
@@ -135,7 +144,6 @@ static int vti_rcv(struct sk_buff *skb)
135 tstats->rx_bytes += skb->len; 144 tstats->rx_bytes += skb->len;
136 u64_stats_update_end(&tstats->syncp); 145 u64_stats_update_end(&tstats->syncp);
137 146
138 skb->mark = 0;
139 secpath_reset(skb); 147 secpath_reset(skb);
140 skb->dev = tunnel->dev; 148 skb->dev = tunnel->dev;
141 return 1; 149 return 1;
@@ -167,7 +175,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
167 175
168 memset(&fl4, 0, sizeof(fl4)); 176 memset(&fl4, 0, sizeof(fl4));
169 flowi4_init_output(&fl4, tunnel->parms.link, 177 flowi4_init_output(&fl4, tunnel->parms.link,
170 be32_to_cpu(tunnel->parms.i_key), RT_TOS(tos), 178 be32_to_cpu(tunnel->parms.o_key), RT_TOS(tos),
171 RT_SCOPE_UNIVERSE, 179 RT_SCOPE_UNIVERSE,
172 IPPROTO_IPIP, 0, 180 IPPROTO_IPIP, 0,
173 dst, tiph->saddr, 0, 0); 181 dst, tiph->saddr, 0, 0);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 727f4365bcdf..6011615e810d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2072,7 +2072,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2072 RT_SCOPE_LINK); 2072 RT_SCOPE_LINK);
2073 goto make_route; 2073 goto make_route;
2074 } 2074 }
2075 if (fl4->saddr) { 2075 if (!fl4->saddr) {
2076 if (ipv4_is_multicast(fl4->daddr)) 2076 if (ipv4_is_multicast(fl4->daddr))
2077 fl4->saddr = inet_select_addr(dev_out, 0, 2077 fl4->saddr = inet_select_addr(dev_out, 0,
2078 fl4->flowi4_scope); 2078 fl4->flowi4_scope);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 25a89eaa669d..a16b01b537ba 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1284,7 +1284,10 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1284 tp->lost_cnt_hint -= tcp_skb_pcount(prev); 1284 tp->lost_cnt_hint -= tcp_skb_pcount(prev);
1285 } 1285 }
1286 1286
1287 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags; 1287 TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1288 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1289 TCP_SKB_CB(prev)->end_seq++;
1290
1288 if (skb == tcp_highest_sack(sk)) 1291 if (skb == tcp_highest_sack(sk))
1289 tcp_advance_highest_sack(sk, skb); 1292 tcp_advance_highest_sack(sk, skb);
1290 1293
@@ -3288,7 +3291,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
3288 tcp_init_cwnd_reduction(sk, true); 3291 tcp_init_cwnd_reduction(sk, true);
3289 tcp_set_ca_state(sk, TCP_CA_CWR); 3292 tcp_set_ca_state(sk, TCP_CA_CWR);
3290 tcp_end_cwnd_reduction(sk); 3293 tcp_end_cwnd_reduction(sk);
3291 tcp_set_ca_state(sk, TCP_CA_Open); 3294 tcp_try_keep_open(sk);
3292 NET_INC_STATS_BH(sock_net(sk), 3295 NET_INC_STATS_BH(sock_net(sk),
3293 LINUX_MIB_TCPLOSSPROBERECOVERY); 3296 LINUX_MIB_TCPLOSSPROBERECOVERY);
3294 } 3297 }
@@ -5709,6 +5712,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5709 } else 5712 } else
5710 tcp_init_metrics(sk); 5713 tcp_init_metrics(sk);
5711 5714
5715 tcp_update_pacing_rate(sk);
5716
5712 /* Prevent spurious tcp_cwnd_restart() on first data packet */ 5717 /* Prevent spurious tcp_cwnd_restart() on first data packet */
5713 tp->lsndtime = tcp_time_stamp; 5718 tp->lsndtime = tcp_time_stamp;
5714 5719
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e6bb8256e59f..d46f2143305c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -637,6 +637,8 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
637 unsigned int size = 0; 637 unsigned int size = 0;
638 unsigned int eff_sacks; 638 unsigned int eff_sacks;
639 639
640 opts->options = 0;
641
640#ifdef CONFIG_TCP_MD5SIG 642#ifdef CONFIG_TCP_MD5SIG
641 *md5 = tp->af_specific->md5_lookup(sk, sk); 643 *md5 = tp->af_specific->md5_lookup(sk, sk);
642 if (unlikely(*md5)) { 644 if (unlikely(*md5)) {
@@ -984,8 +986,10 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
984static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, 986static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
985 unsigned int mss_now) 987 unsigned int mss_now)
986{ 988{
987 if (skb->len <= mss_now || !sk_can_gso(sk) || 989 /* Make sure we own this skb before messing gso_size/gso_segs */
988 skb->ip_summed == CHECKSUM_NONE) { 990 WARN_ON_ONCE(skb_cloned(skb));
991
992 if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) {
989 /* Avoid the costly divide in the normal 993 /* Avoid the costly divide in the normal
990 * non-TSO case. 994 * non-TSO case.
991 */ 995 */
@@ -1065,9 +1069,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1065 if (nsize < 0) 1069 if (nsize < 0)
1066 nsize = 0; 1070 nsize = 0;
1067 1071
1068 if (skb_cloned(skb) && 1072 if (skb_unclone(skb, GFP_ATOMIC))
1069 skb_is_nonlinear(skb) &&
1070 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
1071 return -ENOMEM; 1073 return -ENOMEM;
1072 1074
1073 /* Get a new skb... force flag on. */ 1075 /* Get a new skb... force flag on. */
@@ -2342,6 +2344,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2342 int oldpcount = tcp_skb_pcount(skb); 2344 int oldpcount = tcp_skb_pcount(skb);
2343 2345
2344 if (unlikely(oldpcount > 1)) { 2346 if (unlikely(oldpcount > 1)) {
2347 if (skb_unclone(skb, GFP_ATOMIC))
2348 return -ENOMEM;
2345 tcp_init_tso_segs(sk, skb, cur_mss); 2349 tcp_init_tso_segs(sk, skb, cur_mss);
2346 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); 2350 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
2347 } 2351 }
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 9a459be24af7..ccde54248c8c 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -107,6 +107,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
107 107
108 memset(fl4, 0, sizeof(struct flowi4)); 108 memset(fl4, 0, sizeof(struct flowi4));
109 fl4->flowi4_mark = skb->mark; 109 fl4->flowi4_mark = skb->mark;
110 fl4->flowi4_oif = skb_dst(skb)->dev->ifindex;
110 111
111 if (!ip_is_fragment(iph)) { 112 if (!ip_is_fragment(iph)) {
112 switch (iph->protocol) { 113 switch (iph->protocol) {