aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6
diff options
context:
space:
mode:
authorVille Nuorvala <vnuorval@tcs.hut.fi>2006-11-24 20:08:32 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-12-03 00:30:26 -0500
commit6fb32ddeb2502292bb89e17f58600ce265598ed7 (patch)
tree4870680606e4dad418c52de2cd5a842ce53fed2c /net/ipv6
parent305d4b3ce8c2f00643edc3bb19f005f72e8f84fc (diff)
[IPV6]: Don't allocate memory for Tunnel Encapsulation Limit Option
Signed-off-by: Ville Nuorvala <vnuorval@tcs.hut.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/ip6_tunnel.c58
1 files changed, 22 insertions, 36 deletions
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 3944ea24c38c..daad1e5a2a85 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -572,31 +572,23 @@ discard:
572 return 0; 572 return 0;
573} 573}
574 574
575static inline struct ipv6_txoptions *create_tel(__u8 encap_limit) 575struct ipv6_tel_txoption {
576{ 576 struct ipv6_txoptions ops;
577 struct ipv6_tlv_tnl_enc_lim *tel; 577 __u8 dst_opt[8];
578 struct ipv6_txoptions *opt; 578};
579 __u8 *raw;
580
581 int opt_len = sizeof(*opt) + 8;
582
583 if (!(opt = kzalloc(opt_len, GFP_ATOMIC))) {
584 return NULL;
585 }
586 opt->tot_len = opt_len;
587 opt->dst0opt = (struct ipv6_opt_hdr *) (opt + 1);
588 opt->opt_nflen = 8;
589 579
590 tel = (struct ipv6_tlv_tnl_enc_lim *) (opt->dst0opt + 1); 580static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
591 tel->type = IPV6_TLV_TNL_ENCAP_LIMIT; 581{
592 tel->length = 1; 582 memset(opt, 0, sizeof(struct ipv6_tel_txoption));
593 tel->encap_limit = encap_limit;
594 583
595 raw = (__u8 *) opt->dst0opt; 584 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
596 raw[5] = IPV6_TLV_PADN; 585 opt->dst_opt[3] = 1;
597 raw[6] = 1; 586 opt->dst_opt[4] = encap_limit;
587 opt->dst_opt[5] = IPV6_TLV_PADN;
588 opt->dst_opt[6] = 1;
598 589
599 return opt; 590 opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
591 opt->ops.opt_nflen = 8;
600} 592}
601 593
602/** 594/**
@@ -666,8 +658,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
666 struct ip6_tnl *t = netdev_priv(dev); 658 struct ip6_tnl *t = netdev_priv(dev);
667 struct net_device_stats *stats = &t->stat; 659 struct net_device_stats *stats = &t->stat;
668 struct ipv6hdr *ipv6h = skb->nh.ipv6h; 660 struct ipv6hdr *ipv6h = skb->nh.ipv6h;
669 struct ipv6_txoptions *opt = NULL;
670 int encap_limit = -1; 661 int encap_limit = -1;
662 struct ipv6_tel_txoption opt;
671 __u16 offset; 663 __u16 offset;
672 struct flowi fl; 664 struct flowi fl;
673 struct dst_entry *dst; 665 struct dst_entry *dst;
@@ -696,9 +688,9 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
696 goto tx_err; 688 goto tx_err;
697 } 689 }
698 encap_limit = tel->encap_limit - 1; 690 encap_limit = tel->encap_limit - 1;
699 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) { 691 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
700 encap_limit = t->parms.encap_limit; 692 encap_limit = t->parms.encap_limit;
701 } 693
702 memcpy(&fl, &t->fl, sizeof (fl)); 694 memcpy(&fl, &t->fl, sizeof (fl));
703 proto = fl.proto; 695 proto = fl.proto;
704 696
@@ -708,9 +700,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
708 if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)) 700 if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL))
709 fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK); 701 fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
710 702
711 if (encap_limit >= 0 && (opt = create_tel(encap_limit)) == NULL)
712 goto tx_err;
713
714 if ((dst = ip6_tnl_dst_check(t)) != NULL) 703 if ((dst = ip6_tnl_dst_check(t)) != NULL)
715 dst_hold(dst); 704 dst_hold(dst);
716 else { 705 else {
@@ -731,7 +720,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
731 goto tx_err_dst_release; 720 goto tx_err_dst_release;
732 } 721 }
733 mtu = dst_mtu(dst) - sizeof (*ipv6h); 722 mtu = dst_mtu(dst) - sizeof (*ipv6h);
734 if (opt) { 723 if (encap_limit >= 0) {
735 max_headroom += 8; 724 max_headroom += 8;
736 mtu -= 8; 725 mtu -= 8;
737 } 726 }
@@ -769,9 +758,10 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
769 758
770 skb->h.raw = skb->nh.raw; 759 skb->h.raw = skb->nh.raw;
771 760
772 if (opt) 761 if (encap_limit >= 0) {
773 ipv6_push_nfrag_opts(skb, opt, &proto, NULL); 762 init_tel_txopt(&opt, encap_limit);
774 763 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
764 }
775 skb->nh.raw = skb_push(skb, sizeof(struct ipv6hdr)); 765 skb->nh.raw = skb_push(skb, sizeof(struct ipv6hdr));
776 ipv6h = skb->nh.ipv6h; 766 ipv6h = skb->nh.ipv6h;
777 *(__be32*)ipv6h = fl.fl6_flowlabel | htonl(0x60000000); 767 *(__be32*)ipv6h = fl.fl6_flowlabel | htonl(0x60000000);
@@ -795,9 +785,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
795 stats->tx_aborted_errors++; 785 stats->tx_aborted_errors++;
796 } 786 }
797 ip6_tnl_dst_store(t, dst); 787 ip6_tnl_dst_store(t, dst);
798
799 kfree(opt);
800
801 t->recursion--; 788 t->recursion--;
802 return 0; 789 return 0;
803tx_err_link_failure: 790tx_err_link_failure:
@@ -805,7 +792,6 @@ tx_err_link_failure:
805 dst_link_failure(skb); 792 dst_link_failure(skb);
806tx_err_dst_release: 793tx_err_dst_release:
807 dst_release(dst); 794 dst_release(dst);
808 kfree(opt);
809tx_err: 795tx_err:
810 stats->tx_errors++; 796 stats->tx_errors++;
811 stats->tx_dropped++; 797 stats->tx_dropped++;