aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYasuyuki Kozakai <yasuyuki.kozakai@toshiba.co.jp>2006-11-05 08:56:45 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 01:23:39 -0400
commit61ec2aec28ba8de09f76a558a5d6d3893b1d2e47 (patch)
tree50499ae5687150205a324fe3d79c4b022f6e3d67
parent8359925be8bb5960f614e3f25454f3ef7cc9df65 (diff)
[IPV6] IP6TUNNEL: Split out generic routine in ip6ip6_xmit().
This enables to add IPv4/IPv6 specific handling later, Signed-off-by: Yasuyuki Kozakai <yasuyuki.kozakai@toshiba.co.jp> Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv6/ip6_tunnel.c143
1 files changed, 98 insertions, 45 deletions
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 985d106dff6d..4546bb923a20 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -678,9 +678,13 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
678 return ret; 678 return ret;
679} 679}
680/** 680/**
681 * ip6ip6_tnl_xmit - encapsulate packet and send 681 * ip6_tnl_xmit2 - encapsulate packet and send
682 * @skb: the outgoing socket buffer 682 * @skb: the outgoing socket buffer
683 * @dev: the outgoing tunnel device 683 * @dev: the outgoing tunnel device
684 * @dsfield: dscp code for outer header
685 * @fl: flow of tunneled packet
686 * @encap_limit: encapsulation limit
687 * @pmtu: Path MTU is stored if packet is too big
684 * 688 *
685 * Description: 689 * Description:
686 * Build new header and do some sanity checks on the packet before sending 690 * Build new header and do some sanity checks on the packet before sending
@@ -688,62 +692,35 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
688 * 692 *
689 * Return: 693 * Return:
690 * 0 694 * 0
695 * -1 fail
696 * %-EMSGSIZE message too big. return mtu in this case.
691 **/ 697 **/
692 698
693static int 699static int ip6_tnl_xmit2(struct sk_buff *skb,
694ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 700 struct net_device *dev,
701 __u8 dsfield,
702 struct flowi *fl,
703 int encap_limit,
704 __u32 *pmtu)
695{ 705{
696 struct ip6_tnl *t = netdev_priv(dev); 706 struct ip6_tnl *t = netdev_priv(dev);
697 struct net_device_stats *stats = &t->stat; 707 struct net_device_stats *stats = &t->stat;
698 struct ipv6hdr *ipv6h = skb->nh.ipv6h; 708 struct ipv6hdr *ipv6h = skb->nh.ipv6h;
699 int encap_limit = -1;
700 struct ipv6_tel_txoption opt; 709 struct ipv6_tel_txoption opt;
701 __u16 offset;
702 struct flowi fl;
703 struct dst_entry *dst; 710 struct dst_entry *dst;
704 struct net_device *tdev; 711 struct net_device *tdev;
705 int mtu; 712 int mtu;
706 int max_headroom = sizeof(struct ipv6hdr); 713 int max_headroom = sizeof(struct ipv6hdr);
707 u8 proto; 714 u8 proto;
708 int err; 715 int err = -1;
709 int pkt_len; 716 int pkt_len;
710 int dsfield;
711
712 if (t->recursion++) {
713 stats->collisions++;
714 goto tx_err;
715 }
716 if (skb->protocol != htons(ETH_P_IPV6) ||
717 !ip6_tnl_xmit_ctl(t) || ip6ip6_tnl_addr_conflict(t, ipv6h))
718 goto tx_err;
719
720 if ((offset = parse_tlv_tnl_enc_lim(skb, skb->nh.raw)) > 0) {
721 struct ipv6_tlv_tnl_enc_lim *tel;
722 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->nh.raw[offset];
723 if (tel->encap_limit == 0) {
724 icmpv6_send(skb, ICMPV6_PARAMPROB,
725 ICMPV6_HDR_FIELD, offset + 2, skb->dev);
726 goto tx_err;
727 }
728 encap_limit = tel->encap_limit - 1;
729 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
730 encap_limit = t->parms.encap_limit;
731
732 memcpy(&fl, &t->fl, sizeof (fl));
733 proto = fl.proto;
734
735 dsfield = ipv6_get_dsfield(ipv6h);
736 if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS))
737 fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
738 if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL))
739 fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
740 717
741 if ((dst = ip6_tnl_dst_check(t)) != NULL) 718 if ((dst = ip6_tnl_dst_check(t)) != NULL)
742 dst_hold(dst); 719 dst_hold(dst);
743 else { 720 else {
744 dst = ip6_route_output(NULL, &fl); 721 dst = ip6_route_output(NULL, fl);
745 722
746 if (dst->error || xfrm_lookup(&dst, &fl, NULL, 0) < 0) 723 if (dst->error || xfrm_lookup(&dst, fl, NULL, 0) < 0)
747 goto tx_err_link_failure; 724 goto tx_err_link_failure;
748 } 725 }
749 726
@@ -767,7 +744,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
767 if (skb->dst) 744 if (skb->dst)
768 skb->dst->ops->update_pmtu(skb->dst, mtu); 745 skb->dst->ops->update_pmtu(skb->dst, mtu);
769 if (skb->len > mtu) { 746 if (skb->len > mtu) {
770 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); 747 *pmtu = mtu;
748 err = -EMSGSIZE;
771 goto tx_err_dst_release; 749 goto tx_err_dst_release;
772 } 750 }
773 751
@@ -793,20 +771,21 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
793 771
794 skb->h.raw = skb->nh.raw; 772 skb->h.raw = skb->nh.raw;
795 773
774 proto = fl->proto;
796 if (encap_limit >= 0) { 775 if (encap_limit >= 0) {
797 init_tel_txopt(&opt, encap_limit); 776 init_tel_txopt(&opt, encap_limit);
798 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); 777 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
799 } 778 }
800 skb->nh.raw = skb_push(skb, sizeof(struct ipv6hdr)); 779 skb->nh.raw = skb_push(skb, sizeof(struct ipv6hdr));
801 ipv6h = skb->nh.ipv6h; 780 ipv6h = skb->nh.ipv6h;
802 *(__be32*)ipv6h = fl.fl6_flowlabel | htonl(0x60000000); 781 *(__be32*)ipv6h = fl->fl6_flowlabel | htonl(0x60000000);
803 dsfield = INET_ECN_encapsulate(0, dsfield); 782 dsfield = INET_ECN_encapsulate(0, dsfield);
804 ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield); 783 ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
805 ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); 784 ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
806 ipv6h->hop_limit = t->parms.hop_limit; 785 ipv6h->hop_limit = t->parms.hop_limit;
807 ipv6h->nexthdr = proto; 786 ipv6h->nexthdr = proto;
808 ipv6_addr_copy(&ipv6h->saddr, &fl.fl6_src); 787 ipv6_addr_copy(&ipv6h->saddr, &fl->fl6_src);
809 ipv6_addr_copy(&ipv6h->daddr, &fl.fl6_dst); 788 ipv6_addr_copy(&ipv6h->daddr, &fl->fl6_dst);
810 nf_reset(skb); 789 nf_reset(skb);
811 pkt_len = skb->len; 790 pkt_len = skb->len;
812 err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, 791 err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL,
@@ -820,13 +799,87 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
820 stats->tx_aborted_errors++; 799 stats->tx_aborted_errors++;
821 } 800 }
822 ip6_tnl_dst_store(t, dst); 801 ip6_tnl_dst_store(t, dst);
823 t->recursion--;
824 return 0; 802 return 0;
825tx_err_link_failure: 803tx_err_link_failure:
826 stats->tx_carrier_errors++; 804 stats->tx_carrier_errors++;
827 dst_link_failure(skb); 805 dst_link_failure(skb);
828tx_err_dst_release: 806tx_err_dst_release:
829 dst_release(dst); 807 dst_release(dst);
808 return err;
809}
810
811static inline int
812ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
813{
814 struct ip6_tnl *t = netdev_priv(dev);
815 struct ipv6hdr *ipv6h = skb->nh.ipv6h;
816 int encap_limit = -1;
817 __u16 offset;
818 struct flowi fl;
819 __u8 dsfield;
820 __u32 mtu;
821 int err;
822
823 if (!ip6_tnl_xmit_ctl(t) || ip6ip6_tnl_addr_conflict(t, ipv6h))
824 return -1;
825
826 if ((offset = parse_tlv_tnl_enc_lim(skb, skb->nh.raw)) > 0) {
827 struct ipv6_tlv_tnl_enc_lim *tel;
828 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->nh.raw[offset];
829 if (tel->encap_limit == 0) {
830 icmpv6_send(skb, ICMPV6_PARAMPROB,
831 ICMPV6_HDR_FIELD, offset + 2, skb->dev);
832 return -1;
833 }
834 encap_limit = tel->encap_limit - 1;
835 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
836 encap_limit = t->parms.encap_limit;
837
838 memcpy(&fl, &t->fl, sizeof (fl));
839 fl.proto = IPPROTO_IPV6;
840
841 dsfield = ipv6_get_dsfield(ipv6h);
842 if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS))
843 fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
844 if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL))
845 fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
846
847 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu);
848 if (err != 0) {
849 if (err == -EMSGSIZE)
850 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
851 return -1;
852 }
853
854 return 0;
855}
856
857static int
858ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
859{
860 struct ip6_tnl *t = netdev_priv(dev);
861 struct net_device_stats *stats = &t->stat;
862 int ret;
863
864 if (t->recursion++) {
865 t->stat.collisions++;
866 goto tx_err;
867 }
868
869 switch (skb->protocol) {
870 case __constant_htons(ETH_P_IPV6):
871 ret = ip6ip6_tnl_xmit(skb, dev);
872 break;
873 default:
874 goto tx_err;
875 }
876
877 if (ret < 0)
878 goto tx_err;
879
880 t->recursion--;
881 return 0;
882
830tx_err: 883tx_err:
831 stats->tx_errors++; 884 stats->tx_errors++;
832 stats->tx_dropped++; 885 stats->tx_dropped++;
@@ -1088,7 +1141,7 @@ static void ip6ip6_tnl_dev_setup(struct net_device *dev)
1088 SET_MODULE_OWNER(dev); 1141 SET_MODULE_OWNER(dev);
1089 dev->uninit = ip6ip6_tnl_dev_uninit; 1142 dev->uninit = ip6ip6_tnl_dev_uninit;
1090 dev->destructor = free_netdev; 1143 dev->destructor = free_netdev;
1091 dev->hard_start_xmit = ip6ip6_tnl_xmit; 1144 dev->hard_start_xmit = ip6_tnl_xmit;
1092 dev->get_stats = ip6ip6_tnl_get_stats; 1145 dev->get_stats = ip6ip6_tnl_get_stats;
1093 dev->do_ioctl = ip6ip6_tnl_ioctl; 1146 dev->do_ioctl = ip6ip6_tnl_ioctl;
1094 dev->change_mtu = ip6ip6_tnl_change_mtu; 1147 dev->change_mtu = ip6ip6_tnl_change_mtu;