aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2014-02-13 17:09:12 -0500
committerDavid S. Miller <davem@davemloft.net>2014-02-13 17:17:02 -0500
commitfe6cc55f3a9a053482a76f5a6b2257cee51b4663 (patch)
tree70617e150766dea911320b3bb06bca5642c340a0 /net
parentd206940319c41df4299db75ed56142177bb2e5f6 (diff)
net: ip, ipv6: handle gso skbs in forwarding path
Marcelo Ricardo Leitner reported problems when the forwarding link path has a lower mtu than the incoming one if the inbound interface supports GRO. Given: Host <mtu1500> R1 <mtu1200> R2 Host sends tcp stream which is routed via R1 and R2. R1 performs GRO. In this case, the kernel will fail to send ICMP fragmentation needed messages (or pkt too big for ipv6), as GSO packets currently bypass dstmtu checks in forward path. Instead, Linux tries to send out packets exceeding the mtu. When locking route MTU on Host (i.e., no ipv4 DF bit set), R1 does not fragment the packets when forwarding, and again tries to send out packets exceeding R1-R2 link mtu. This alters the forwarding dstmtu checks to take the individual gso segment lengths into account. For ipv6, we send out pkt too big error for gso if the individual segments are too big. For ipv4, we either send icmp fragmentation needed, or, if the DF bit is not set, perform software segmentation and let the output path create fragments when the packet is leaving the machine. It is not 100% correct as the error message will contain the headers of the GRO skb instead of the original/segmented one, but it seems to work fine in my (limited) tests. Eric Dumazet suggested to simply shrink mss via ->gso_size to avoid sofware segmentation. However it turns out that skb_segment() assumes skb nr_frags is related to mss size so we would BUG there. I don't want to mess with it considering Herbert and Eric disagree on what the correct behavior should be. Hannes Frederic Sowa notes that when we would shrink gso_size skb_segment would then also need to deal with the case where SKB_MAX_FRAGS would be exceeded. This uses sofware segmentation in the forward path when we hit ipv4 non-DF packets and the outgoing link mtu is too small. Its not perfect, but given the lack of bug reports wrt. GRO fwd being broken this is a rare case anyway. Also its not like this could not be improved later once the dust settles. Acked-by: Herbert Xu <herbert@gondor.apana.org.au> Reported-by: Marcelo Ricardo Leitner <mleitner@redhat.com> Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/ip_forward.c71
-rw-r--r--net/ipv6/ip6_output.c17
2 files changed, 84 insertions, 4 deletions
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index e9f1217a8afd..f3869c186d97 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -39,6 +39,71 @@
39#include <net/route.h> 39#include <net/route.h>
40#include <net/xfrm.h> 40#include <net/xfrm.h>
41 41
42static bool ip_may_fragment(const struct sk_buff *skb)
43{
44 return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
45 !skb->local_df;
46}
47
48static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
49{
50 if (skb->len <= mtu || skb->local_df)
51 return false;
52
53 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
54 return false;
55
56 return true;
57}
58
59static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
60{
61 unsigned int mtu;
62
63 if (skb->local_df || !skb_is_gso(skb))
64 return false;
65
66 mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true);
67
68 /* if seglen > mtu, do software segmentation for IP fragmentation on
69 * output. DF bit cannot be set since ip_forward would have sent
70 * icmp error.
71 */
72 return skb_gso_network_seglen(skb) > mtu;
73}
74
75/* called if GSO skb needs to be fragmented on forward */
76static int ip_forward_finish_gso(struct sk_buff *skb)
77{
78 struct dst_entry *dst = skb_dst(skb);
79 netdev_features_t features;
80 struct sk_buff *segs;
81 int ret = 0;
82
83 features = netif_skb_dev_features(skb, dst->dev);
84 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
85 if (IS_ERR(segs)) {
86 kfree_skb(skb);
87 return -ENOMEM;
88 }
89
90 consume_skb(skb);
91
92 do {
93 struct sk_buff *nskb = segs->next;
94 int err;
95
96 segs->next = NULL;
97 err = dst_output(segs);
98
99 if (err && ret == 0)
100 ret = err;
101 segs = nskb;
102 } while (segs);
103
104 return ret;
105}
106
42static int ip_forward_finish(struct sk_buff *skb) 107static int ip_forward_finish(struct sk_buff *skb)
43{ 108{
44 struct ip_options *opt = &(IPCB(skb)->opt); 109 struct ip_options *opt = &(IPCB(skb)->opt);
@@ -49,6 +114,9 @@ static int ip_forward_finish(struct sk_buff *skb)
49 if (unlikely(opt->optlen)) 114 if (unlikely(opt->optlen))
50 ip_forward_options(skb); 115 ip_forward_options(skb);
51 116
117 if (ip_gso_exceeds_dst_mtu(skb))
118 return ip_forward_finish_gso(skb);
119
52 return dst_output(skb); 120 return dst_output(skb);
53} 121}
54 122
@@ -91,8 +159,7 @@ int ip_forward(struct sk_buff *skb)
91 159
92 IPCB(skb)->flags |= IPSKB_FORWARDED; 160 IPCB(skb)->flags |= IPSKB_FORWARDED;
93 mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); 161 mtu = ip_dst_mtu_maybe_forward(&rt->dst, true);
94 if (unlikely(skb->len > mtu && !skb_is_gso(skb) && 162 if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, mtu)) {
95 (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) {
96 IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS); 163 IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS);
97 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 164 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
98 htonl(mtu)); 165 htonl(mtu));
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index ef02b26ccf81..070a2fae2375 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -342,6 +342,20 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
342 return mtu; 342 return mtu;
343} 343}
344 344
345static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
346{
347 if (skb->len <= mtu || skb->local_df)
348 return false;
349
350 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
351 return true;
352
353 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
354 return false;
355
356 return true;
357}
358
345int ip6_forward(struct sk_buff *skb) 359int ip6_forward(struct sk_buff *skb)
346{ 360{
347 struct dst_entry *dst = skb_dst(skb); 361 struct dst_entry *dst = skb_dst(skb);
@@ -466,8 +480,7 @@ int ip6_forward(struct sk_buff *skb)
466 if (mtu < IPV6_MIN_MTU) 480 if (mtu < IPV6_MIN_MTU)
467 mtu = IPV6_MIN_MTU; 481 mtu = IPV6_MIN_MTU;
468 482
469 if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) || 483 if (ip6_pkt_too_big(skb, mtu)) {
470 (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) {
471 /* Again, force OUTPUT device used as source address */ 484 /* Again, force OUTPUT device used as source address */
472 skb->dev = dst->dev; 485 skb->dev = dst->dev;
473 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 486 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);