aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPravin B Shelar <pshelar@nicira.com>2013-07-02 13:57:33 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-07-28 19:29:49 -0400
commitedb42cad3392ba6a1cd3a688cfb7af16d6b2137f (patch)
treea9501831534238b5d9158d5d93eeecd68fafbd28
parent36bddbad5049d3f3916abc9f594526d47ad3ab84 (diff)
ip_tunnels: Use skb-len to PMTU check.
[ Upstream commit 23a3647bc4f93bac3776c66dc2c7f7f68b3cd662 ] In path mtu check, ip header total length works for gre device but not for gre-tap device. Use skb len which is consistent for all tunneling types. This is old bug in gre. This also fixes mtu calculation bug introduced by commit c54419321455631079c7d (GRE: Refactor GRE tunneling code). Reported-by: Timo Teras <timo.teras@iki.fi> Signed-off-by: Pravin B Shelar <pshelar@nicira.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--net/ipv4/ip_tunnel.c97
1 files changed, 54 insertions, 43 deletions
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 7fa8f08fa7ae..d05bd02886ed 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -486,6 +486,53 @@ drop:
486} 486}
487EXPORT_SYMBOL_GPL(ip_tunnel_rcv); 487EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
488 488
489static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
490 struct rtable *rt, __be16 df)
491{
492 struct ip_tunnel *tunnel = netdev_priv(dev);
493 int pkt_size = skb->len - tunnel->hlen;
494 int mtu;
495
496 if (df)
497 mtu = dst_mtu(&rt->dst) - dev->hard_header_len
498 - sizeof(struct iphdr) - tunnel->hlen;
499 else
500 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
501
502 if (skb_dst(skb))
503 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
504
505 if (skb->protocol == htons(ETH_P_IP)) {
506 if (!skb_is_gso(skb) &&
507 (df & htons(IP_DF)) && mtu < pkt_size) {
508 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
509 return -E2BIG;
510 }
511 }
512#if IS_ENABLED(CONFIG_IPV6)
513 else if (skb->protocol == htons(ETH_P_IPV6)) {
514 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
515
516 if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
517 mtu >= IPV6_MIN_MTU) {
518 if ((tunnel->parms.iph.daddr &&
519 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
520 rt6->rt6i_dst.plen == 128) {
521 rt6->rt6i_flags |= RTF_MODIFIED;
522 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
523 }
524 }
525
526 if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
527 mtu < pkt_size) {
528 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
529 return -E2BIG;
530 }
531 }
532#endif
533 return 0;
534}
535
489void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, 536void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
490 const struct iphdr *tnl_params) 537 const struct iphdr *tnl_params)
491{ 538{
@@ -499,7 +546,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
499 struct net_device *tdev; /* Device to other host */ 546 struct net_device *tdev; /* Device to other host */
500 unsigned int max_headroom; /* The extra header space needed */ 547 unsigned int max_headroom; /* The extra header space needed */
501 __be32 dst; 548 __be32 dst;
502 int mtu;
503 549
504 inner_iph = (const struct iphdr *)skb_inner_network_header(skb); 550 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
505 551
@@ -579,50 +625,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
579 goto tx_error; 625 goto tx_error;
580 } 626 }
581 627
582 df = tnl_params->frag_off;
583 628
584 if (df) 629 if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) {
585 mtu = dst_mtu(&rt->dst) - dev->hard_header_len 630 ip_rt_put(rt);
586 - sizeof(struct iphdr); 631 goto tx_error;
587 else
588 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
589
590 if (skb_dst(skb))
591 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
592
593 if (skb->protocol == htons(ETH_P_IP)) {
594 df |= (inner_iph->frag_off&htons(IP_DF));
595
596 if (!skb_is_gso(skb) &&
597 (inner_iph->frag_off&htons(IP_DF)) &&
598 mtu < ntohs(inner_iph->tot_len)) {
599 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
600 ip_rt_put(rt);
601 goto tx_error;
602 }
603 }
604#if IS_ENABLED(CONFIG_IPV6)
605 else if (skb->protocol == htons(ETH_P_IPV6)) {
606 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
607
608 if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
609 mtu >= IPV6_MIN_MTU) {
610 if ((tunnel->parms.iph.daddr &&
611 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
612 rt6->rt6i_dst.plen == 128) {
613 rt6->rt6i_flags |= RTF_MODIFIED;
614 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
615 }
616 }
617
618 if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
619 mtu < skb->len) {
620 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
621 ip_rt_put(rt);
622 goto tx_error;
623 }
624 } 632 }
625#endif
626 633
627 if (tunnel->err_count > 0) { 634 if (tunnel->err_count > 0) {
628 if (time_before(jiffies, 635 if (time_before(jiffies,
@@ -646,6 +653,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
646 ttl = ip4_dst_hoplimit(&rt->dst); 653 ttl = ip4_dst_hoplimit(&rt->dst);
647 } 654 }
648 655
656 df = tnl_params->frag_off;
657 if (skb->protocol == htons(ETH_P_IP))
658 df |= (inner_iph->frag_off&htons(IP_DF));
659
649 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr) 660 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr)
650 + rt->dst.header_len; 661 + rt->dst.header_len;
651 if (max_headroom > dev->needed_headroom) { 662 if (max_headroom > dev->needed_headroom) {