aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/ip6mr.c
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2010-05-11 08:40:50 -0400
committerPatrick McHardy <kaber@trash.net>2010-05-11 08:40:50 -0400
commitb5aa30b19121de49021fba57aa1f6e4c787fcf67 (patch)
treedbbf01015bda08d52750f37a797d49c7db1990a9 /net/ipv6/ip6mr.c
parentc476efbcde5ba58b81ac752f4a894d6db8e17d94 (diff)
ipv6: ip6mr: remove net pointer from struct mfc6_cache
Now that cache entries in unres_queue don't need to be distinguished by their network namespace pointer anymore, we can remove it from struct mfc6_cache add pass the namespace as function argument to the functions that need it. Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net/ipv6/ip6mr.c')
-rw-r--r--net/ipv6/ip6mr.c63
1 files changed, 31 insertions, 32 deletions
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 7236030e403e..b3783a436bbd 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -76,10 +76,12 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
76 76
77static struct kmem_cache *mrt_cachep __read_mostly; 77static struct kmem_cache *mrt_cachep __read_mostly;
78 78
79static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache); 79static int ip6_mr_forward(struct net *net, struct sk_buff *skb,
80 struct mfc6_cache *cache);
80static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, 81static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt,
81 mifi_t mifi, int assert); 82 mifi_t mifi, int assert);
82static int ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm); 83static int ip6mr_fill_mroute(struct net *net, struct sk_buff *skb,
84 struct mfc6_cache *c, struct rtmsg *rtm);
83static void mroute_clean_tables(struct net *net); 85static void mroute_clean_tables(struct net *net);
84 86
85 87
@@ -523,7 +525,6 @@ static int mif6_delete(struct net *net, int vifi, struct list_head *head)
523 525
524static inline void ip6mr_cache_free(struct mfc6_cache *c) 526static inline void ip6mr_cache_free(struct mfc6_cache *c)
525{ 527{
526 release_net(mfc6_net(c));
527 kmem_cache_free(mrt_cachep, c); 528 kmem_cache_free(mrt_cachep, c);
528} 529}
529 530
@@ -531,10 +532,9 @@ static inline void ip6mr_cache_free(struct mfc6_cache *c)
531 and reporting error to netlink readers. 532 and reporting error to netlink readers.
532 */ 533 */
533 534
534static void ip6mr_destroy_unres(struct mfc6_cache *c) 535static void ip6mr_destroy_unres(struct net *net, struct mfc6_cache *c)
535{ 536{
536 struct sk_buff *skb; 537 struct sk_buff *skb;
537 struct net *net = mfc6_net(c);
538 538
539 atomic_dec(&net->ipv6.cache_resolve_queue_len); 539 atomic_dec(&net->ipv6.cache_resolve_queue_len);
540 540
@@ -575,7 +575,7 @@ static void ipmr_do_expire_process(struct net *net)
575 } 575 }
576 576
577 *cp = c->next; 577 *cp = c->next;
578 ip6mr_destroy_unres(c); 578 ip6mr_destroy_unres(net, c);
579 } 579 }
580 580
581 if (net->ipv6.mfc6_unres_queue != NULL) 581 if (net->ipv6.mfc6_unres_queue != NULL)
@@ -599,10 +599,10 @@ static void ipmr_expire_process(unsigned long arg)
599 599
600/* Fill oifs list. It is called under write locked mrt_lock. */ 600/* Fill oifs list. It is called under write locked mrt_lock. */
601 601
602static void ip6mr_update_thresholds(struct mfc6_cache *cache, unsigned char *ttls) 602static void ip6mr_update_thresholds(struct net *net, struct mfc6_cache *cache,
603 unsigned char *ttls)
603{ 604{
604 int vifi; 605 int vifi;
605 struct net *net = mfc6_net(cache);
606 606
607 cache->mfc_un.res.minvif = MAXMIFS; 607 cache->mfc_un.res.minvif = MAXMIFS;
608 cache->mfc_un.res.maxvif = 0; 608 cache->mfc_un.res.maxvif = 0;
@@ -717,24 +717,22 @@ static struct mfc6_cache *ip6mr_cache_find(struct net *net,
717/* 717/*
718 * Allocate a multicast cache entry 718 * Allocate a multicast cache entry
719 */ 719 */
720static struct mfc6_cache *ip6mr_cache_alloc(struct net *net) 720static struct mfc6_cache *ip6mr_cache_alloc(void)
721{ 721{
722 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); 722 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
723 if (c == NULL) 723 if (c == NULL)
724 return NULL; 724 return NULL;
725 c->mfc_un.res.minvif = MAXMIFS; 725 c->mfc_un.res.minvif = MAXMIFS;
726 mfc6_net_set(c, net);
727 return c; 726 return c;
728} 727}
729 728
730static struct mfc6_cache *ip6mr_cache_alloc_unres(struct net *net) 729static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
731{ 730{
732 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); 731 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
733 if (c == NULL) 732 if (c == NULL)
734 return NULL; 733 return NULL;
735 skb_queue_head_init(&c->mfc_un.unres.unresolved); 734 skb_queue_head_init(&c->mfc_un.unres.unresolved);
736 c->mfc_un.unres.expires = jiffies + 10 * HZ; 735 c->mfc_un.unres.expires = jiffies + 10 * HZ;
737 mfc6_net_set(c, net);
738 return c; 736 return c;
739} 737}
740 738
@@ -742,7 +740,8 @@ static struct mfc6_cache *ip6mr_cache_alloc_unres(struct net *net)
742 * A cache entry has gone into a resolved state from queued 740 * A cache entry has gone into a resolved state from queued
743 */ 741 */
744 742
745static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c) 743static void ip6mr_cache_resolve(struct net *net, struct mfc6_cache *uc,
744 struct mfc6_cache *c)
746{ 745{
747 struct sk_buff *skb; 746 struct sk_buff *skb;
748 747
@@ -755,7 +754,7 @@ static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c)
755 int err; 754 int err;
756 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr)); 755 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
757 756
758 if (ip6mr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) { 757 if (ip6mr_fill_mroute(net, skb, c, NLMSG_DATA(nlh)) > 0) {
759 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh; 758 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
760 } else { 759 } else {
761 nlh->nlmsg_type = NLMSG_ERROR; 760 nlh->nlmsg_type = NLMSG_ERROR;
@@ -763,9 +762,9 @@ static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c)
763 skb_trim(skb, nlh->nlmsg_len); 762 skb_trim(skb, nlh->nlmsg_len);
764 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE; 763 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
765 } 764 }
766 err = rtnl_unicast(skb, mfc6_net(uc), NETLINK_CB(skb).pid); 765 err = rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
767 } else 766 } else
768 ip6_mr_forward(skb, c); 767 ip6_mr_forward(net, skb, c);
769 } 768 }
770} 769}
771 770
@@ -889,7 +888,7 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb)
889 */ 888 */
890 889
891 if (atomic_read(&net->ipv6.cache_resolve_queue_len) >= 10 || 890 if (atomic_read(&net->ipv6.cache_resolve_queue_len) >= 10 ||
892 (c = ip6mr_cache_alloc_unres(net)) == NULL) { 891 (c = ip6mr_cache_alloc_unres()) == NULL) {
893 spin_unlock_bh(&mfc_unres_lock); 892 spin_unlock_bh(&mfc_unres_lock);
894 893
895 kfree_skb(skb); 894 kfree_skb(skb);
@@ -1133,7 +1132,7 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
1133 if (c != NULL) { 1132 if (c != NULL) {
1134 write_lock_bh(&mrt_lock); 1133 write_lock_bh(&mrt_lock);
1135 c->mf6c_parent = mfc->mf6cc_parent; 1134 c->mf6c_parent = mfc->mf6cc_parent;
1136 ip6mr_update_thresholds(c, ttls); 1135 ip6mr_update_thresholds(net, c, ttls);
1137 if (!mrtsock) 1136 if (!mrtsock)
1138 c->mfc_flags |= MFC_STATIC; 1137 c->mfc_flags |= MFC_STATIC;
1139 write_unlock_bh(&mrt_lock); 1138 write_unlock_bh(&mrt_lock);
@@ -1143,14 +1142,14 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
1143 if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr)) 1142 if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1144 return -EINVAL; 1143 return -EINVAL;
1145 1144
1146 c = ip6mr_cache_alloc(net); 1145 c = ip6mr_cache_alloc();
1147 if (c == NULL) 1146 if (c == NULL)
1148 return -ENOMEM; 1147 return -ENOMEM;
1149 1148
1150 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr; 1149 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1151 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr; 1150 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1152 c->mf6c_parent = mfc->mf6cc_parent; 1151 c->mf6c_parent = mfc->mf6cc_parent;
1153 ip6mr_update_thresholds(c, ttls); 1152 ip6mr_update_thresholds(net, c, ttls);
1154 if (!mrtsock) 1153 if (!mrtsock)
1155 c->mfc_flags |= MFC_STATIC; 1154 c->mfc_flags |= MFC_STATIC;
1156 1155
@@ -1178,7 +1177,7 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
1178 spin_unlock_bh(&mfc_unres_lock); 1177 spin_unlock_bh(&mfc_unres_lock);
1179 1178
1180 if (uc) { 1179 if (uc) {
1181 ip6mr_cache_resolve(uc, c); 1180 ip6mr_cache_resolve(net, uc, c);
1182 ip6mr_cache_free(uc); 1181 ip6mr_cache_free(uc);
1183 } 1182 }
1184 return 0; 1183 return 0;
@@ -1229,7 +1228,7 @@ static void mroute_clean_tables(struct net *net)
1229 cp = &net->ipv6.mfc6_unres_queue; 1228 cp = &net->ipv6.mfc6_unres_queue;
1230 while ((c = *cp) != NULL) { 1229 while ((c = *cp) != NULL) {
1231 *cp = c->next; 1230 *cp = c->next;
1232 ip6mr_destroy_unres(c); 1231 ip6mr_destroy_unres(net, c);
1233 } 1232 }
1234 spin_unlock_bh(&mfc_unres_lock); 1233 spin_unlock_bh(&mfc_unres_lock);
1235 } 1234 }
@@ -1497,10 +1496,10 @@ static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1497 * Processing handlers for ip6mr_forward 1496 * Processing handlers for ip6mr_forward
1498 */ 1497 */
1499 1498
1500static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi) 1499static int ip6mr_forward2(struct net *net, struct sk_buff *skb,
1500 struct mfc6_cache *c, int vifi)
1501{ 1501{
1502 struct ipv6hdr *ipv6h; 1502 struct ipv6hdr *ipv6h;
1503 struct net *net = mfc6_net(c);
1504 struct mif_device *vif = &net->ipv6.vif6_table[vifi]; 1503 struct mif_device *vif = &net->ipv6.vif6_table[vifi];
1505 struct net_device *dev; 1504 struct net_device *dev;
1506 struct dst_entry *dst; 1505 struct dst_entry *dst;
@@ -1581,11 +1580,11 @@ static int ip6mr_find_vif(struct net_device *dev)
1581 return ct; 1580 return ct;
1582} 1581}
1583 1582
1584static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache) 1583static int ip6_mr_forward(struct net *net, struct sk_buff *skb,
1584 struct mfc6_cache *cache)
1585{ 1585{
1586 int psend = -1; 1586 int psend = -1;
1587 int vif, ct; 1587 int vif, ct;
1588 struct net *net = mfc6_net(cache);
1589 1588
1590 vif = cache->mf6c_parent; 1589 vif = cache->mf6c_parent;
1591 cache->mfc_un.res.pkt++; 1590 cache->mfc_un.res.pkt++;
@@ -1627,13 +1626,13 @@ static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache)
1627 if (psend != -1) { 1626 if (psend != -1) {
1628 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1627 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1629 if (skb2) 1628 if (skb2)
1630 ip6mr_forward2(skb2, cache, psend); 1629 ip6mr_forward2(net, skb2, cache, psend);
1631 } 1630 }
1632 psend = ct; 1631 psend = ct;
1633 } 1632 }
1634 } 1633 }
1635 if (psend != -1) { 1634 if (psend != -1) {
1636 ip6mr_forward2(skb, cache, psend); 1635 ip6mr_forward2(net, skb, cache, psend);
1637 return 0; 1636 return 0;
1638 } 1637 }
1639 1638
@@ -1674,7 +1673,7 @@ int ip6_mr_input(struct sk_buff *skb)
1674 return -ENODEV; 1673 return -ENODEV;
1675 } 1674 }
1676 1675
1677 ip6_mr_forward(skb, cache); 1676 ip6_mr_forward(net, skb, cache);
1678 1677
1679 read_unlock(&mrt_lock); 1678 read_unlock(&mrt_lock);
1680 1679
@@ -1683,11 +1682,11 @@ int ip6_mr_input(struct sk_buff *skb)
1683 1682
1684 1683
1685static int 1684static int
1686ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm) 1685ip6mr_fill_mroute(struct net *net, struct sk_buff *skb, struct mfc6_cache *c,
1686 struct rtmsg *rtm)
1687{ 1687{
1688 int ct; 1688 int ct;
1689 struct rtnexthop *nhp; 1689 struct rtnexthop *nhp;
1690 struct net *net = mfc6_net(c);
1691 u8 *b = skb_tail_pointer(skb); 1690 u8 *b = skb_tail_pointer(skb);
1692 struct rtattr *mp_head; 1691 struct rtattr *mp_head;
1693 1692
@@ -1781,7 +1780,7 @@ int ip6mr_get_route(struct net *net,
1781 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) 1780 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1782 cache->mfc_flags |= MFC_NOTIFY; 1781 cache->mfc_flags |= MFC_NOTIFY;
1783 1782
1784 err = ip6mr_fill_mroute(skb, cache, rtm); 1783 err = ip6mr_fill_mroute(net, skb, cache, rtm);
1785 read_unlock(&mrt_lock); 1784 read_unlock(&mrt_lock);
1786 return err; 1785 return err;
1787} 1786}