aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-10-23 16:28:39 -0400
committerDavid S. Miller <davem@davemloft.net>2013-10-23 16:49:34 -0400
commitc3fa32b9764dc45dcf8a2231b1c110abc4a63e0b (patch)
tree6cf2896a77b65bec64284681e1c3851eb3263e09 /net/ipv4
parent34d92d5315b64a3e5292b7e9511c1bb617227fb6 (diff)
parent320437af954cbe66478f1f5e8b34cb5a8d072191 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/usb/qmi_wwan.c include/net/dst.h Trivial merge conflicts, both were overlapping changes. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/ip_output.c13
-rw-r--r--net/ipv4/ip_vti.c14
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_output.c12
-rw-r--r--net/ipv4/xfrm4_policy.c1
5 files changed, 31 insertions, 13 deletions
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 7d8357bb2ba6..8fbac7de1e1b 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -772,15 +772,20 @@ static inline int ip_ufo_append_data(struct sock *sk,
772 /* initialize protocol header pointer */ 772 /* initialize protocol header pointer */
773 skb->transport_header = skb->network_header + fragheaderlen; 773 skb->transport_header = skb->network_header + fragheaderlen;
774 774
775 skb->ip_summed = CHECKSUM_PARTIAL;
776 skb->csum = 0; 775 skb->csum = 0;
777 776
778 /* specify the length of each IP datagram fragment */ 777
779 skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
780 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
781 __skb_queue_tail(queue, skb); 778 __skb_queue_tail(queue, skb);
779 } else if (skb_is_gso(skb)) {
780 goto append;
782 } 781 }
783 782
783 skb->ip_summed = CHECKSUM_PARTIAL;
784 /* specify the length of each IP datagram fragment */
785 skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
786 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
787
788append:
784 return skb_append_datato_frags(sk, skb, getfrag, from, 789 return skb_append_datato_frags(sk, skb, getfrag, from,
785 (length - transhdrlen)); 790 (length - transhdrlen));
786} 791}
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 91f69bc883fe..5d9c845d288a 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -61,8 +61,17 @@ static int vti_rcv(struct sk_buff *skb)
61 iph->saddr, iph->daddr, 0); 61 iph->saddr, iph->daddr, 0);
62 if (tunnel != NULL) { 62 if (tunnel != NULL) {
63 struct pcpu_tstats *tstats; 63 struct pcpu_tstats *tstats;
64 u32 oldmark = skb->mark;
65 int ret;
64 66
65 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 67
68 /* temporarily mark the skb with the tunnel o_key, to
69 * only match policies with this mark.
70 */
71 skb->mark = be32_to_cpu(tunnel->parms.o_key);
72 ret = xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb);
73 skb->mark = oldmark;
74 if (!ret)
66 return -1; 75 return -1;
67 76
68 tstats = this_cpu_ptr(tunnel->dev->tstats); 77 tstats = this_cpu_ptr(tunnel->dev->tstats);
@@ -71,7 +80,6 @@ static int vti_rcv(struct sk_buff *skb)
71 tstats->rx_bytes += skb->len; 80 tstats->rx_bytes += skb->len;
72 u64_stats_update_end(&tstats->syncp); 81 u64_stats_update_end(&tstats->syncp);
73 82
74 skb->mark = 0;
75 secpath_reset(skb); 83 secpath_reset(skb);
76 skb->dev = tunnel->dev; 84 skb->dev = tunnel->dev;
77 return 1; 85 return 1;
@@ -103,7 +111,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
103 111
104 memset(&fl4, 0, sizeof(fl4)); 112 memset(&fl4, 0, sizeof(fl4));
105 flowi4_init_output(&fl4, tunnel->parms.link, 113 flowi4_init_output(&fl4, tunnel->parms.link,
106 be32_to_cpu(tunnel->parms.i_key), RT_TOS(tos), 114 be32_to_cpu(tunnel->parms.o_key), RT_TOS(tos),
107 RT_SCOPE_UNIVERSE, 115 RT_SCOPE_UNIVERSE,
108 IPPROTO_IPIP, 0, 116 IPPROTO_IPIP, 0,
109 dst, tiph->saddr, 0, 0); 117 dst, tiph->saddr, 0, 0);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index eb651a069a6c..b935397c703c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3338,7 +3338,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
3338 tcp_init_cwnd_reduction(sk, true); 3338 tcp_init_cwnd_reduction(sk, true);
3339 tcp_set_ca_state(sk, TCP_CA_CWR); 3339 tcp_set_ca_state(sk, TCP_CA_CWR);
3340 tcp_end_cwnd_reduction(sk); 3340 tcp_end_cwnd_reduction(sk);
3341 tcp_set_ca_state(sk, TCP_CA_Open); 3341 tcp_try_keep_open(sk);
3342 NET_INC_STATS_BH(sock_net(sk), 3342 NET_INC_STATS_BH(sock_net(sk),
3343 LINUX_MIB_TCPLOSSPROBERECOVERY); 3343 LINUX_MIB_TCPLOSSPROBERECOVERY);
3344 } 3344 }
@@ -5751,6 +5751,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5751 } else 5751 } else
5752 tcp_init_metrics(sk); 5752 tcp_init_metrics(sk);
5753 5753
5754 tcp_update_pacing_rate(sk);
5755
5754 /* Prevent spurious tcp_cwnd_restart() on first data packet */ 5756 /* Prevent spurious tcp_cwnd_restart() on first data packet */
5755 tp->lsndtime = tcp_time_stamp; 5757 tp->lsndtime = tcp_time_stamp;
5756 5758
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index ce7c4d9d9195..672854664ff5 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -986,8 +986,10 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
986static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, 986static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
987 unsigned int mss_now) 987 unsigned int mss_now)
988{ 988{
989 if (skb->len <= mss_now || !sk_can_gso(sk) || 989 /* Make sure we own this skb before messing gso_size/gso_segs */
990 skb->ip_summed == CHECKSUM_NONE) { 990 WARN_ON_ONCE(skb_cloned(skb));
991
992 if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) {
991 /* Avoid the costly divide in the normal 993 /* Avoid the costly divide in the normal
992 * non-TSO case. 994 * non-TSO case.
993 */ 995 */
@@ -1067,9 +1069,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1067 if (nsize < 0) 1069 if (nsize < 0)
1068 nsize = 0; 1070 nsize = 0;
1069 1071
1070 if (skb_cloned(skb) && 1072 if (skb_unclone(skb, GFP_ATOMIC))
1071 skb_is_nonlinear(skb) &&
1072 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
1073 return -ENOMEM; 1073 return -ENOMEM;
1074 1074
1075 /* Get a new skb... force flag on. */ 1075 /* Get a new skb... force flag on. */
@@ -2344,6 +2344,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2344 int oldpcount = tcp_skb_pcount(skb); 2344 int oldpcount = tcp_skb_pcount(skb);
2345 2345
2346 if (unlikely(oldpcount > 1)) { 2346 if (unlikely(oldpcount > 1)) {
2347 if (skb_unclone(skb, GFP_ATOMIC))
2348 return -ENOMEM;
2347 tcp_init_tso_segs(sk, skb, cur_mss); 2349 tcp_init_tso_segs(sk, skb, cur_mss);
2348 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); 2350 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
2349 } 2351 }
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 9a459be24af7..ccde54248c8c 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -107,6 +107,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
107 107
108 memset(fl4, 0, sizeof(struct flowi4)); 108 memset(fl4, 0, sizeof(struct flowi4));
109 fl4->flowi4_mark = skb->mark; 109 fl4->flowi4_mark = skb->mark;
110 fl4->flowi4_oif = skb_dst(skb)->dev->ifindex;
110 111
111 if (!ip_is_fragment(iph)) { 112 if (!ip_is_fragment(iph)) {
112 switch (iph->protocol) { 113 switch (iph->protocol) {