diff options
Diffstat (limited to 'net/ipv4/ip_tunnel.c')
-rw-r--r-- | net/ipv4/ip_tunnel.c | 177 |
1 files changed, 79 insertions, 98 deletions
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 7fa8f08fa7ae..945734b2f209 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
@@ -304,6 +304,7 @@ static struct net_device *__ip_tunnel_create(struct net *net, | |||
304 | 304 | ||
305 | tunnel = netdev_priv(dev); | 305 | tunnel = netdev_priv(dev); |
306 | tunnel->parms = *parms; | 306 | tunnel->parms = *parms; |
307 | tunnel->net = net; | ||
307 | 308 | ||
308 | err = register_netdevice(dev); | 309 | err = register_netdevice(dev); |
309 | if (err) | 310 | if (err) |
@@ -408,13 +409,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, | |||
408 | const struct iphdr *iph = ip_hdr(skb); | 409 | const struct iphdr *iph = ip_hdr(skb); |
409 | int err; | 410 | int err; |
410 | 411 | ||
411 | secpath_reset(skb); | ||
412 | |||
413 | skb->protocol = tpi->proto; | ||
414 | |||
415 | skb->mac_header = skb->network_header; | ||
416 | __pskb_pull(skb, tunnel->hlen); | ||
417 | skb_postpull_rcsum(skb, skb_transport_header(skb), tunnel->hlen); | ||
418 | #ifdef CONFIG_NET_IPGRE_BROADCAST | 412 | #ifdef CONFIG_NET_IPGRE_BROADCAST |
419 | if (ipv4_is_multicast(iph->daddr)) { | 413 | if (ipv4_is_multicast(iph->daddr)) { |
420 | /* Looped back packet, drop it! */ | 414 | /* Looped back packet, drop it! */ |
@@ -442,23 +436,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, | |||
442 | tunnel->i_seqno = ntohl(tpi->seq) + 1; | 436 | tunnel->i_seqno = ntohl(tpi->seq) + 1; |
443 | } | 437 | } |
444 | 438 | ||
445 | /* Warning: All skb pointers will be invalidated! */ | ||
446 | if (tunnel->dev->type == ARPHRD_ETHER) { | ||
447 | if (!pskb_may_pull(skb, ETH_HLEN)) { | ||
448 | tunnel->dev->stats.rx_length_errors++; | ||
449 | tunnel->dev->stats.rx_errors++; | ||
450 | goto drop; | ||
451 | } | ||
452 | |||
453 | iph = ip_hdr(skb); | ||
454 | skb->protocol = eth_type_trans(skb, tunnel->dev); | ||
455 | skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); | ||
456 | } | ||
457 | |||
458 | skb->pkt_type = PACKET_HOST; | ||
459 | __skb_tunnel_rx(skb, tunnel->dev); | ||
460 | |||
461 | skb_reset_network_header(skb); | ||
462 | err = IP_ECN_decapsulate(iph, skb); | 439 | err = IP_ECN_decapsulate(iph, skb); |
463 | if (unlikely(err)) { | 440 | if (unlikely(err)) { |
464 | if (log_ecn_error) | 441 | if (log_ecn_error) |
@@ -477,6 +454,15 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, | |||
477 | tstats->rx_bytes += skb->len; | 454 | tstats->rx_bytes += skb->len; |
478 | u64_stats_update_end(&tstats->syncp); | 455 | u64_stats_update_end(&tstats->syncp); |
479 | 456 | ||
457 | if (tunnel->net != dev_net(tunnel->dev)) | ||
458 | skb_scrub_packet(skb); | ||
459 | |||
460 | if (tunnel->dev->type == ARPHRD_ETHER) { | ||
461 | skb->protocol = eth_type_trans(skb, tunnel->dev); | ||
462 | skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); | ||
463 | } else { | ||
464 | skb->dev = tunnel->dev; | ||
465 | } | ||
480 | gro_cells_receive(&tunnel->gro_cells, skb); | 466 | gro_cells_receive(&tunnel->gro_cells, skb); |
481 | return 0; | 467 | return 0; |
482 | 468 | ||
@@ -486,24 +472,69 @@ drop: | |||
486 | } | 472 | } |
487 | EXPORT_SYMBOL_GPL(ip_tunnel_rcv); | 473 | EXPORT_SYMBOL_GPL(ip_tunnel_rcv); |
488 | 474 | ||
475 | static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, | ||
476 | struct rtable *rt, __be16 df) | ||
477 | { | ||
478 | struct ip_tunnel *tunnel = netdev_priv(dev); | ||
479 | int pkt_size = skb->len - tunnel->hlen; | ||
480 | int mtu; | ||
481 | |||
482 | if (df) | ||
483 | mtu = dst_mtu(&rt->dst) - dev->hard_header_len | ||
484 | - sizeof(struct iphdr) - tunnel->hlen; | ||
485 | else | ||
486 | mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; | ||
487 | |||
488 | if (skb_dst(skb)) | ||
489 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); | ||
490 | |||
491 | if (skb->protocol == htons(ETH_P_IP)) { | ||
492 | if (!skb_is_gso(skb) && | ||
493 | (df & htons(IP_DF)) && mtu < pkt_size) { | ||
494 | memset(IPCB(skb), 0, sizeof(*IPCB(skb))); | ||
495 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); | ||
496 | return -E2BIG; | ||
497 | } | ||
498 | } | ||
499 | #if IS_ENABLED(CONFIG_IPV6) | ||
500 | else if (skb->protocol == htons(ETH_P_IPV6)) { | ||
501 | struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb); | ||
502 | |||
503 | if (rt6 && mtu < dst_mtu(skb_dst(skb)) && | ||
504 | mtu >= IPV6_MIN_MTU) { | ||
505 | if ((tunnel->parms.iph.daddr && | ||
506 | !ipv4_is_multicast(tunnel->parms.iph.daddr)) || | ||
507 | rt6->rt6i_dst.plen == 128) { | ||
508 | rt6->rt6i_flags |= RTF_MODIFIED; | ||
509 | dst_metric_set(skb_dst(skb), RTAX_MTU, mtu); | ||
510 | } | ||
511 | } | ||
512 | |||
513 | if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU && | ||
514 | mtu < pkt_size) { | ||
515 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | ||
516 | return -E2BIG; | ||
517 | } | ||
518 | } | ||
519 | #endif | ||
520 | return 0; | ||
521 | } | ||
522 | |||
489 | void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | 523 | void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, |
490 | const struct iphdr *tnl_params) | 524 | const struct iphdr *tnl_params, const u8 protocol) |
491 | { | 525 | { |
492 | struct ip_tunnel *tunnel = netdev_priv(dev); | 526 | struct ip_tunnel *tunnel = netdev_priv(dev); |
493 | const struct iphdr *inner_iph; | 527 | const struct iphdr *inner_iph; |
494 | struct iphdr *iph; | ||
495 | struct flowi4 fl4; | 528 | struct flowi4 fl4; |
496 | u8 tos, ttl; | 529 | u8 tos, ttl; |
497 | __be16 df; | 530 | __be16 df; |
498 | struct rtable *rt; /* Route to the other host */ | 531 | struct rtable *rt; /* Route to the other host */ |
499 | struct net_device *tdev; /* Device to other host */ | ||
500 | unsigned int max_headroom; /* The extra header space needed */ | 532 | unsigned int max_headroom; /* The extra header space needed */ |
501 | __be32 dst; | 533 | __be32 dst; |
502 | int mtu; | 534 | int err; |
503 | 535 | ||
504 | inner_iph = (const struct iphdr *)skb_inner_network_header(skb); | 536 | inner_iph = (const struct iphdr *)skb_inner_network_header(skb); |
505 | 537 | ||
506 | memset(IPCB(skb), 0, sizeof(*IPCB(skb))); | ||
507 | dst = tnl_params->daddr; | 538 | dst = tnl_params->daddr; |
508 | if (dst == 0) { | 539 | if (dst == 0) { |
509 | /* NBMA tunnel */ | 540 | /* NBMA tunnel */ |
@@ -561,8 +592,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
561 | tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph); | 592 | tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph); |
562 | } | 593 | } |
563 | 594 | ||
564 | rt = ip_route_output_tunnel(dev_net(dev), &fl4, | 595 | rt = ip_route_output_tunnel(tunnel->net, &fl4, |
565 | tunnel->parms.iph.protocol, | 596 | protocol, |
566 | dst, tnl_params->saddr, | 597 | dst, tnl_params->saddr, |
567 | tunnel->parms.o_key, | 598 | tunnel->parms.o_key, |
568 | RT_TOS(tos), | 599 | RT_TOS(tos), |
@@ -571,58 +602,19 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
571 | dev->stats.tx_carrier_errors++; | 602 | dev->stats.tx_carrier_errors++; |
572 | goto tx_error; | 603 | goto tx_error; |
573 | } | 604 | } |
574 | tdev = rt->dst.dev; | 605 | if (rt->dst.dev == dev) { |
575 | |||
576 | if (tdev == dev) { | ||
577 | ip_rt_put(rt); | 606 | ip_rt_put(rt); |
578 | dev->stats.collisions++; | 607 | dev->stats.collisions++; |
579 | goto tx_error; | 608 | goto tx_error; |
580 | } | 609 | } |
581 | 610 | ||
582 | df = tnl_params->frag_off; | 611 | if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) { |
583 | 612 | ip_rt_put(rt); | |
584 | if (df) | 613 | goto tx_error; |
585 | mtu = dst_mtu(&rt->dst) - dev->hard_header_len | ||
586 | - sizeof(struct iphdr); | ||
587 | else | ||
588 | mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; | ||
589 | |||
590 | if (skb_dst(skb)) | ||
591 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); | ||
592 | |||
593 | if (skb->protocol == htons(ETH_P_IP)) { | ||
594 | df |= (inner_iph->frag_off&htons(IP_DF)); | ||
595 | |||
596 | if (!skb_is_gso(skb) && | ||
597 | (inner_iph->frag_off&htons(IP_DF)) && | ||
598 | mtu < ntohs(inner_iph->tot_len)) { | ||
599 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); | ||
600 | ip_rt_put(rt); | ||
601 | goto tx_error; | ||
602 | } | ||
603 | } | 614 | } |
604 | #if IS_ENABLED(CONFIG_IPV6) | ||
605 | else if (skb->protocol == htons(ETH_P_IPV6)) { | ||
606 | struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb); | ||
607 | |||
608 | if (rt6 && mtu < dst_mtu(skb_dst(skb)) && | ||
609 | mtu >= IPV6_MIN_MTU) { | ||
610 | if ((tunnel->parms.iph.daddr && | ||
611 | !ipv4_is_multicast(tunnel->parms.iph.daddr)) || | ||
612 | rt6->rt6i_dst.plen == 128) { | ||
613 | rt6->rt6i_flags |= RTF_MODIFIED; | ||
614 | dst_metric_set(skb_dst(skb), RTAX_MTU, mtu); | ||
615 | } | ||
616 | } | ||
617 | 615 | ||
618 | if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU && | 616 | if (tunnel->net != dev_net(dev)) |
619 | mtu < skb->len) { | 617 | skb_scrub_packet(skb); |
620 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | ||
621 | ip_rt_put(rt); | ||
622 | goto tx_error; | ||
623 | } | ||
624 | } | ||
625 | #endif | ||
626 | 618 | ||
627 | if (tunnel->err_count > 0) { | 619 | if (tunnel->err_count > 0) { |
628 | if (time_before(jiffies, | 620 | if (time_before(jiffies, |
@@ -646,8 +638,12 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
646 | ttl = ip4_dst_hoplimit(&rt->dst); | 638 | ttl = ip4_dst_hoplimit(&rt->dst); |
647 | } | 639 | } |
648 | 640 | ||
649 | max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr) | 641 | df = tnl_params->frag_off; |
650 | + rt->dst.header_len; | 642 | if (skb->protocol == htons(ETH_P_IP)) |
643 | df |= (inner_iph->frag_off&htons(IP_DF)); | ||
644 | |||
645 | max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) | ||
646 | + rt->dst.header_len; | ||
651 | if (max_headroom > dev->needed_headroom) { | 647 | if (max_headroom > dev->needed_headroom) { |
652 | dev->needed_headroom = max_headroom; | 648 | dev->needed_headroom = max_headroom; |
653 | if (skb_cow_head(skb, dev->needed_headroom)) { | 649 | if (skb_cow_head(skb, dev->needed_headroom)) { |
@@ -657,27 +653,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
657 | } | 653 | } |
658 | } | 654 | } |
659 | 655 | ||
660 | skb_dst_drop(skb); | 656 | err = iptunnel_xmit(dev_net(dev), rt, skb, |
661 | skb_dst_set(skb, &rt->dst); | 657 | fl4.saddr, fl4.daddr, protocol, |
662 | 658 | ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df); | |
663 | /* Push down and install the IP header. */ | 659 | iptunnel_xmit_stats(err, &dev->stats, dev->tstats); |
664 | skb_push(skb, sizeof(struct iphdr)); | ||
665 | skb_reset_network_header(skb); | ||
666 | |||
667 | iph = ip_hdr(skb); | ||
668 | inner_iph = (const struct iphdr *)skb_inner_network_header(skb); | ||
669 | 660 | ||
670 | iph->version = 4; | ||
671 | iph->ihl = sizeof(struct iphdr) >> 2; | ||
672 | iph->frag_off = df; | ||
673 | iph->protocol = tnl_params->protocol; | ||
674 | iph->tos = ip_tunnel_ecn_encap(tos, inner_iph, skb); | ||
675 | iph->daddr = fl4.daddr; | ||
676 | iph->saddr = fl4.saddr; | ||
677 | iph->ttl = ttl; | ||
678 | tunnel_ip_select_ident(skb, inner_iph, &rt->dst); | ||
679 | |||
680 | iptunnel_xmit(skb, dev); | ||
681 | return; | 661 | return; |
682 | 662 | ||
683 | #if IS_ENABLED(CONFIG_IPV6) | 663 | #if IS_ENABLED(CONFIG_IPV6) |
@@ -926,6 +906,7 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], | |||
926 | if (ip_tunnel_find(itn, p, dev->type)) | 906 | if (ip_tunnel_find(itn, p, dev->type)) |
927 | return -EEXIST; | 907 | return -EEXIST; |
928 | 908 | ||
909 | nt->net = net; | ||
929 | nt->parms = *p; | 910 | nt->parms = *p; |
930 | err = register_netdevice(dev); | 911 | err = register_netdevice(dev); |
931 | if (err) | 912 | if (err) |