aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorPravin B Shelar <pshelar@nicira.com>2013-07-02 13:57:33 -0400
committerDavid S. Miller <davem@davemloft.net>2013-07-02 19:43:35 -0400
commit23a3647bc4f93bac3776c66dc2c7f7f68b3cd662 (patch)
tree40dba500db5b02a9e04c6d7bb64a69c60c7f7e88 /net/ipv4
parent784771e74b9207b0dd9e1f3b04729e6356c88650 (diff)
ip_tunnels: Use skb-len to PMTU check.
In path mtu check, ip header total length works for gre device but not for gre-tap device. Use skb len which is consistent for all tunneling types. This is old bug in gre. This also fixes mtu calculation bug introduced by commit c54419321455631079c7d (GRE: Refactor GRE tunneling code). Reported-by: Timo Teras <timo.teras@iki.fi> Signed-off-by: Pravin B Shelar <pshelar@nicira.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/ip_tunnel.c99
1 files changed, 55 insertions, 44 deletions
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 394cebc96d22..945734b2f209 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -472,6 +472,54 @@ drop:
472} 472}
473EXPORT_SYMBOL_GPL(ip_tunnel_rcv); 473EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
474 474
475static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
476 struct rtable *rt, __be16 df)
477{
478 struct ip_tunnel *tunnel = netdev_priv(dev);
479 int pkt_size = skb->len - tunnel->hlen;
480 int mtu;
481
482 if (df)
483 mtu = dst_mtu(&rt->dst) - dev->hard_header_len
484 - sizeof(struct iphdr) - tunnel->hlen;
485 else
486 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
487
488 if (skb_dst(skb))
489 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
490
491 if (skb->protocol == htons(ETH_P_IP)) {
492 if (!skb_is_gso(skb) &&
493 (df & htons(IP_DF)) && mtu < pkt_size) {
494 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
495 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
496 return -E2BIG;
497 }
498 }
499#if IS_ENABLED(CONFIG_IPV6)
500 else if (skb->protocol == htons(ETH_P_IPV6)) {
501 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
502
503 if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
504 mtu >= IPV6_MIN_MTU) {
505 if ((tunnel->parms.iph.daddr &&
506 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
507 rt6->rt6i_dst.plen == 128) {
508 rt6->rt6i_flags |= RTF_MODIFIED;
509 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
510 }
511 }
512
513 if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
514 mtu < pkt_size) {
515 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
516 return -E2BIG;
517 }
518 }
519#endif
520 return 0;
521}
522
475void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, 523void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
476 const struct iphdr *tnl_params, const u8 protocol) 524 const struct iphdr *tnl_params, const u8 protocol)
477{ 525{
@@ -483,7 +531,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
483 struct rtable *rt; /* Route to the other host */ 531 struct rtable *rt; /* Route to the other host */
484 unsigned int max_headroom; /* The extra header space needed */ 532 unsigned int max_headroom; /* The extra header space needed */
485 __be32 dst; 533 __be32 dst;
486 int mtu;
487 int err; 534 int err;
488 535
489 inner_iph = (const struct iphdr *)skb_inner_network_header(skb); 536 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
@@ -560,51 +607,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
560 dev->stats.collisions++; 607 dev->stats.collisions++;
561 goto tx_error; 608 goto tx_error;
562 } 609 }
563 df = tnl_params->frag_off;
564 610
565 if (df) 611 if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) {
566 mtu = dst_mtu(&rt->dst) - dev->hard_header_len 612 ip_rt_put(rt);
567 - sizeof(struct iphdr); 613 goto tx_error;
568 else
569 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
570
571 if (skb_dst(skb))
572 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
573
574 if (skb->protocol == htons(ETH_P_IP)) {
575 df |= (inner_iph->frag_off&htons(IP_DF));
576
577 if (!skb_is_gso(skb) &&
578 (inner_iph->frag_off&htons(IP_DF)) &&
579 mtu < ntohs(inner_iph->tot_len)) {
580 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
581 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
582 ip_rt_put(rt);
583 goto tx_error;
584 }
585 }
586#if IS_ENABLED(CONFIG_IPV6)
587 else if (skb->protocol == htons(ETH_P_IPV6)) {
588 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
589
590 if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
591 mtu >= IPV6_MIN_MTU) {
592 if ((tunnel->parms.iph.daddr &&
593 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
594 rt6->rt6i_dst.plen == 128) {
595 rt6->rt6i_flags |= RTF_MODIFIED;
596 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
597 }
598 }
599
600 if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
601 mtu < skb->len) {
602 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
603 ip_rt_put(rt);
604 goto tx_error;
605 }
606 } 614 }
607#endif
608 615
609 if (tunnel->net != dev_net(dev)) 616 if (tunnel->net != dev_net(dev))
610 skb_scrub_packet(skb); 617 skb_scrub_packet(skb);
@@ -631,6 +638,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
631 ttl = ip4_dst_hoplimit(&rt->dst); 638 ttl = ip4_dst_hoplimit(&rt->dst);
632 } 639 }
633 640
641 df = tnl_params->frag_off;
642 if (skb->protocol == htons(ETH_P_IP))
643 df |= (inner_iph->frag_off&htons(IP_DF));
644
634 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) 645 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
635 + rt->dst.header_len; 646 + rt->dst.header_len;
636 if (max_headroom > dev->needed_headroom) { 647 if (max_headroom > dev->needed_headroom) {