aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/ipmr.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/ipmr.c')
-rw-r--r--net/ipv4/ipmr.c26
1 files changed, 17 insertions, 9 deletions
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 75a5f79cc226..8428a0fb5c10 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -327,6 +327,12 @@ static int vif_delete(int vifi, int notify)
327 return 0; 327 return 0;
328} 328}
329 329
330static inline void ipmr_cache_free(struct mfc_cache *c)
331{
332 release_net(mfc_net(c));
333 kmem_cache_free(mrt_cachep, c);
334}
335
330/* Destroy an unresolved cache entry, killing queued skbs 336/* Destroy an unresolved cache entry, killing queued skbs
331 and reporting error to netlink readers. 337 and reporting error to netlink readers.
332 */ 338 */
@@ -353,7 +359,7 @@ static void ipmr_destroy_unres(struct mfc_cache *c)
353 kfree_skb(skb); 359 kfree_skb(skb);
354 } 360 }
355 361
356 kmem_cache_free(mrt_cachep, c); 362 ipmr_cache_free(c);
357} 363}
358 364
359 365
@@ -528,22 +534,24 @@ static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
528/* 534/*
529 * Allocate a multicast cache entry 535 * Allocate a multicast cache entry
530 */ 536 */
531static struct mfc_cache *ipmr_cache_alloc(void) 537static struct mfc_cache *ipmr_cache_alloc(struct net *net)
532{ 538{
533 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); 539 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
534 if (c == NULL) 540 if (c == NULL)
535 return NULL; 541 return NULL;
536 c->mfc_un.res.minvif = MAXVIFS; 542 c->mfc_un.res.minvif = MAXVIFS;
543 mfc_net_set(c, net);
537 return c; 544 return c;
538} 545}
539 546
540static struct mfc_cache *ipmr_cache_alloc_unres(void) 547static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net)
541{ 548{
542 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); 549 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
543 if (c == NULL) 550 if (c == NULL)
544 return NULL; 551 return NULL;
545 skb_queue_head_init(&c->mfc_un.unres.unresolved); 552 skb_queue_head_init(&c->mfc_un.unres.unresolved);
546 c->mfc_un.unres.expires = jiffies + 10*HZ; 553 c->mfc_un.unres.expires = jiffies + 10*HZ;
554 mfc_net_set(c, net);
547 return c; 555 return c;
548} 556}
549 557
@@ -695,7 +703,7 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
695 */ 703 */
696 704
697 if (atomic_read(&cache_resolve_queue_len) >= 10 || 705 if (atomic_read(&cache_resolve_queue_len) >= 10 ||
698 (c=ipmr_cache_alloc_unres())==NULL) { 706 (c = ipmr_cache_alloc_unres(&init_net)) == NULL) {
699 spin_unlock_bh(&mfc_unres_lock); 707 spin_unlock_bh(&mfc_unres_lock);
700 708
701 kfree_skb(skb); 709 kfree_skb(skb);
@@ -718,7 +726,7 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
718 */ 726 */
719 spin_unlock_bh(&mfc_unres_lock); 727 spin_unlock_bh(&mfc_unres_lock);
720 728
721 kmem_cache_free(mrt_cachep, c); 729 ipmr_cache_free(c);
722 kfree_skb(skb); 730 kfree_skb(skb);
723 return err; 731 return err;
724 } 732 }
@@ -763,7 +771,7 @@ static int ipmr_mfc_delete(struct mfcctl *mfc)
763 *cp = c->next; 771 *cp = c->next;
764 write_unlock_bh(&mrt_lock); 772 write_unlock_bh(&mrt_lock);
765 773
766 kmem_cache_free(mrt_cachep, c); 774 ipmr_cache_free(c);
767 return 0; 775 return 0;
768 } 776 }
769 } 777 }
@@ -796,7 +804,7 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
796 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) 804 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
797 return -EINVAL; 805 return -EINVAL;
798 806
799 c = ipmr_cache_alloc(); 807 c = ipmr_cache_alloc(&init_net);
800 if (c == NULL) 808 if (c == NULL)
801 return -ENOMEM; 809 return -ENOMEM;
802 810
@@ -831,7 +839,7 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
831 839
832 if (uc) { 840 if (uc) {
833 ipmr_cache_resolve(uc, c); 841 ipmr_cache_resolve(uc, c);
834 kmem_cache_free(mrt_cachep, uc); 842 ipmr_cache_free(uc);
835 } 843 }
836 return 0; 844 return 0;
837} 845}
@@ -868,7 +876,7 @@ static void mroute_clean_tables(struct sock *sk)
868 *cp = c->next; 876 *cp = c->next;
869 write_unlock_bh(&mrt_lock); 877 write_unlock_bh(&mrt_lock);
870 878
871 kmem_cache_free(mrt_cachep, c); 879 ipmr_cache_free(c);
872 } 880 }
873 } 881 }
874 882