aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/ipmr.c
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2010-04-13 01:03:20 -0400
committerDavid S. Miller <davem@davemloft.net>2010-04-13 17:49:33 -0400
commitd658f8a0e63b6476148162aa7a3ffffc58dcad52 (patch)
tree25087c18eb91bbe040cf5c9b5e1710d35e37328f /net/ipv4/ipmr.c
parente258beb22f4d3ea3dc88586ffc9c990d0eb03380 (diff)
ipv4: ipmr: remove net pointer from struct mfc_cache
Now that cache entries in unres_queue don't need to be distinguished by their network namespace pointer anymore, we can remove it from struct mfc_cache add pass the namespace as function argument to the functions that need it. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/ipmr.c')
-rw-r--r--net/ipv4/ipmr.c65
1 files changed, 32 insertions, 33 deletions
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index d6aa65e2b08f..f8e25c8ba070 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -93,10 +93,12 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
93 93
94static struct kmem_cache *mrt_cachep __read_mostly; 94static struct kmem_cache *mrt_cachep __read_mostly;
95 95
96static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); 96static int ip_mr_forward(struct net *net, struct sk_buff *skb,
97 struct mfc_cache *cache, int local);
97static int ipmr_cache_report(struct net *net, 98static int ipmr_cache_report(struct net *net,
98 struct sk_buff *pkt, vifi_t vifi, int assert); 99 struct sk_buff *pkt, vifi_t vifi, int assert);
99static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); 100static int ipmr_fill_mroute(struct net *net, struct sk_buff *skb,
101 struct mfc_cache *c, struct rtmsg *rtm);
100 102
101/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ 103/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
102 104
@@ -325,7 +327,6 @@ static int vif_delete(struct net *net, int vifi, int notify,
325 327
326static inline void ipmr_cache_free(struct mfc_cache *c) 328static inline void ipmr_cache_free(struct mfc_cache *c)
327{ 329{
328 release_net(mfc_net(c));
329 kmem_cache_free(mrt_cachep, c); 330 kmem_cache_free(mrt_cachep, c);
330} 331}
331 332
@@ -333,11 +334,10 @@ static inline void ipmr_cache_free(struct mfc_cache *c)
333 and reporting error to netlink readers. 334 and reporting error to netlink readers.
334 */ 335 */
335 336
336static void ipmr_destroy_unres(struct mfc_cache *c) 337static void ipmr_destroy_unres(struct net *net, struct mfc_cache *c)
337{ 338{
338 struct sk_buff *skb; 339 struct sk_buff *skb;
339 struct nlmsgerr *e; 340 struct nlmsgerr *e;
340 struct net *net = mfc_net(c);
341 341
342 atomic_dec(&net->ipv4.cache_resolve_queue_len); 342 atomic_dec(&net->ipv4.cache_resolve_queue_len);
343 343
@@ -392,7 +392,7 @@ static void ipmr_expire_process(unsigned long arg)
392 392
393 *cp = c->next; 393 *cp = c->next;
394 394
395 ipmr_destroy_unres(c); 395 ipmr_destroy_unres(net, c);
396 } 396 }
397 397
398 if (net->ipv4.mfc_unres_queue != NULL) 398 if (net->ipv4.mfc_unres_queue != NULL)
@@ -404,10 +404,10 @@ out:
404 404
405/* Fill oifs list. It is called under write locked mrt_lock. */ 405/* Fill oifs list. It is called under write locked mrt_lock. */
406 406
407static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls) 407static void ipmr_update_thresholds(struct net *net, struct mfc_cache *cache,
408 unsigned char *ttls)
408{ 409{
409 int vifi; 410 int vifi;
410 struct net *net = mfc_net(cache);
411 411
412 cache->mfc_un.res.minvif = MAXVIFS; 412 cache->mfc_un.res.minvif = MAXVIFS;
413 cache->mfc_un.res.maxvif = 0; 413 cache->mfc_un.res.maxvif = 0;
@@ -547,24 +547,22 @@ static struct mfc_cache *ipmr_cache_find(struct net *net,
547/* 547/*
548 * Allocate a multicast cache entry 548 * Allocate a multicast cache entry
549 */ 549 */
550static struct mfc_cache *ipmr_cache_alloc(struct net *net) 550static struct mfc_cache *ipmr_cache_alloc(void)
551{ 551{
552 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); 552 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
553 if (c == NULL) 553 if (c == NULL)
554 return NULL; 554 return NULL;
555 c->mfc_un.res.minvif = MAXVIFS; 555 c->mfc_un.res.minvif = MAXVIFS;
556 mfc_net_set(c, net);
557 return c; 556 return c;
558} 557}
559 558
560static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net) 559static struct mfc_cache *ipmr_cache_alloc_unres(void)
561{ 560{
562 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); 561 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
563 if (c == NULL) 562 if (c == NULL)
564 return NULL; 563 return NULL;
565 skb_queue_head_init(&c->mfc_un.unres.unresolved); 564 skb_queue_head_init(&c->mfc_un.unres.unresolved);
566 c->mfc_un.unres.expires = jiffies + 10*HZ; 565 c->mfc_un.unres.expires = jiffies + 10*HZ;
567 mfc_net_set(c, net);
568 return c; 566 return c;
569} 567}
570 568
@@ -572,7 +570,8 @@ static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net)
572 * A cache entry has gone into a resolved state from queued 570 * A cache entry has gone into a resolved state from queued
573 */ 571 */
574 572
575static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) 573static void ipmr_cache_resolve(struct net *net, struct mfc_cache *uc,
574 struct mfc_cache *c)
576{ 575{
577 struct sk_buff *skb; 576 struct sk_buff *skb;
578 struct nlmsgerr *e; 577 struct nlmsgerr *e;
@@ -585,7 +584,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
585 if (ip_hdr(skb)->version == 0) { 584 if (ip_hdr(skb)->version == 0) {
586 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 585 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
587 586
588 if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) { 587 if (ipmr_fill_mroute(net, skb, c, NLMSG_DATA(nlh)) > 0) {
589 nlh->nlmsg_len = (skb_tail_pointer(skb) - 588 nlh->nlmsg_len = (skb_tail_pointer(skb) -
590 (u8 *)nlh); 589 (u8 *)nlh);
591 } else { 590 } else {
@@ -597,9 +596,9 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
597 memset(&e->msg, 0, sizeof(e->msg)); 596 memset(&e->msg, 0, sizeof(e->msg));
598 } 597 }
599 598
600 rtnl_unicast(skb, mfc_net(c), NETLINK_CB(skb).pid); 599 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
601 } else 600 } else
602 ip_mr_forward(skb, c, 0); 601 ip_mr_forward(net, skb, c, 0);
603 } 602 }
604} 603}
605 604
@@ -717,7 +716,7 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
717 */ 716 */
718 717
719 if (atomic_read(&net->ipv4.cache_resolve_queue_len) >= 10 || 718 if (atomic_read(&net->ipv4.cache_resolve_queue_len) >= 10 ||
720 (c = ipmr_cache_alloc_unres(net)) == NULL) { 719 (c = ipmr_cache_alloc_unres()) == NULL) {
721 spin_unlock_bh(&mfc_unres_lock); 720 spin_unlock_bh(&mfc_unres_lock);
722 721
723 kfree_skb(skb); 722 kfree_skb(skb);
@@ -814,7 +813,7 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
814 if (c != NULL) { 813 if (c != NULL) {
815 write_lock_bh(&mrt_lock); 814 write_lock_bh(&mrt_lock);
816 c->mfc_parent = mfc->mfcc_parent; 815 c->mfc_parent = mfc->mfcc_parent;
817 ipmr_update_thresholds(c, mfc->mfcc_ttls); 816 ipmr_update_thresholds(net, c, mfc->mfcc_ttls);
818 if (!mrtsock) 817 if (!mrtsock)
819 c->mfc_flags |= MFC_STATIC; 818 c->mfc_flags |= MFC_STATIC;
820 write_unlock_bh(&mrt_lock); 819 write_unlock_bh(&mrt_lock);
@@ -824,14 +823,14 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
824 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) 823 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
825 return -EINVAL; 824 return -EINVAL;
826 825
827 c = ipmr_cache_alloc(net); 826 c = ipmr_cache_alloc();
828 if (c == NULL) 827 if (c == NULL)
829 return -ENOMEM; 828 return -ENOMEM;
830 829
831 c->mfc_origin = mfc->mfcc_origin.s_addr; 830 c->mfc_origin = mfc->mfcc_origin.s_addr;
832 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr; 831 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
833 c->mfc_parent = mfc->mfcc_parent; 832 c->mfc_parent = mfc->mfcc_parent;
834 ipmr_update_thresholds(c, mfc->mfcc_ttls); 833 ipmr_update_thresholds(net, c, mfc->mfcc_ttls);
835 if (!mrtsock) 834 if (!mrtsock)
836 c->mfc_flags |= MFC_STATIC; 835 c->mfc_flags |= MFC_STATIC;
837 836
@@ -859,7 +858,7 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
859 spin_unlock_bh(&mfc_unres_lock); 858 spin_unlock_bh(&mfc_unres_lock);
860 859
861 if (uc) { 860 if (uc) {
862 ipmr_cache_resolve(uc, c); 861 ipmr_cache_resolve(net, uc, c);
863 ipmr_cache_free(uc); 862 ipmr_cache_free(uc);
864 } 863 }
865 return 0; 864 return 0;
@@ -910,7 +909,7 @@ static void mroute_clean_tables(struct net *net)
910 cp = &net->ipv4.mfc_unres_queue; 909 cp = &net->ipv4.mfc_unres_queue;
911 while ((c = *cp) != NULL) { 910 while ((c = *cp) != NULL) {
912 *cp = c->next; 911 *cp = c->next;
913 ipmr_destroy_unres(c); 912 ipmr_destroy_unres(net, c);
914 } 913 }
915 spin_unlock_bh(&mfc_unres_lock); 914 spin_unlock_bh(&mfc_unres_lock);
916 } 915 }
@@ -1221,9 +1220,9 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
1221 * Processing handlers for ipmr_forward 1220 * Processing handlers for ipmr_forward
1222 */ 1221 */
1223 1222
1224static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) 1223static void ipmr_queue_xmit(struct net *net, struct sk_buff *skb,
1224 struct mfc_cache *c, int vifi)
1225{ 1225{
1226 struct net *net = mfc_net(c);
1227 const struct iphdr *iph = ip_hdr(skb); 1226 const struct iphdr *iph = ip_hdr(skb);
1228 struct vif_device *vif = &net->ipv4.vif_table[vifi]; 1227 struct vif_device *vif = &net->ipv4.vif_table[vifi];
1229 struct net_device *dev; 1228 struct net_device *dev;
@@ -1335,11 +1334,11 @@ static int ipmr_find_vif(struct net_device *dev)
1335 1334
1336/* "local" means that we should preserve one skb (for local delivery) */ 1335/* "local" means that we should preserve one skb (for local delivery) */
1337 1336
1338static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local) 1337static int ip_mr_forward(struct net *net, struct sk_buff *skb,
1338 struct mfc_cache *cache, int local)
1339{ 1339{
1340 int psend = -1; 1340 int psend = -1;
1341 int vif, ct; 1341 int vif, ct;
1342 struct net *net = mfc_net(cache);
1343 1342
1344 vif = cache->mfc_parent; 1343 vif = cache->mfc_parent;
1345 cache->mfc_un.res.pkt++; 1344 cache->mfc_un.res.pkt++;
@@ -1396,7 +1395,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1396 if (psend != -1) { 1395 if (psend != -1) {
1397 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1396 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1398 if (skb2) 1397 if (skb2)
1399 ipmr_queue_xmit(skb2, cache, psend); 1398 ipmr_queue_xmit(net, skb2, cache, psend);
1400 } 1399 }
1401 psend = ct; 1400 psend = ct;
1402 } 1401 }
@@ -1405,9 +1404,9 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1405 if (local) { 1404 if (local) {
1406 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1405 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1407 if (skb2) 1406 if (skb2)
1408 ipmr_queue_xmit(skb2, cache, psend); 1407 ipmr_queue_xmit(net, skb2, cache, psend);
1409 } else { 1408 } else {
1410 ipmr_queue_xmit(skb, cache, psend); 1409 ipmr_queue_xmit(net, skb, cache, psend);
1411 return 0; 1410 return 0;
1412 } 1411 }
1413 } 1412 }
@@ -1488,7 +1487,7 @@ int ip_mr_input(struct sk_buff *skb)
1488 return -ENODEV; 1487 return -ENODEV;
1489 } 1488 }
1490 1489
1491 ip_mr_forward(skb, cache, local); 1490 ip_mr_forward(net, skb, cache, local);
1492 1491
1493 read_unlock(&mrt_lock); 1492 read_unlock(&mrt_lock);
1494 1493
@@ -1602,11 +1601,11 @@ drop:
1602#endif 1601#endif
1603 1602
1604static int 1603static int
1605ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) 1604ipmr_fill_mroute(struct net *net, struct sk_buff *skb, struct mfc_cache *c,
1605 struct rtmsg *rtm)
1606{ 1606{
1607 int ct; 1607 int ct;
1608 struct rtnexthop *nhp; 1608 struct rtnexthop *nhp;
1609 struct net *net = mfc_net(c);
1610 u8 *b = skb_tail_pointer(skb); 1609 u8 *b = skb_tail_pointer(skb);
1611 struct rtattr *mp_head; 1610 struct rtattr *mp_head;
1612 1611
@@ -1686,7 +1685,7 @@ int ipmr_get_route(struct net *net,
1686 1685
1687 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) 1686 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1688 cache->mfc_flags |= MFC_NOTIFY; 1687 cache->mfc_flags |= MFC_NOTIFY;
1689 err = ipmr_fill_mroute(skb, cache, rtm); 1688 err = ipmr_fill_mroute(net, skb, cache, rtm);
1690 read_unlock(&mrt_lock); 1689 read_unlock(&mrt_lock);
1691 return err; 1690 return err;
1692} 1691}