aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-04-15 17:14:05 -0400
committerDavid S. Miller <davem@davemloft.net>2010-04-15 17:14:05 -0400
commit791f58c0640f906d3f63518d3f02630dbbafb7a2 (patch)
treeb2dbe627ee54b6676946075ff0ef7aa9401f8950 /net
parentfd793d8905720595caede6bd26c5df6c0ecd37f8 (diff)
parent8de53dfbf9a0a0f7538c005137059c5c021476e1 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/ipmr-2.6
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/Kconfig2
-rw-r--r--net/ipv4/ipmr.c10
2 files changed, 9 insertions, 3 deletions
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index be597749c385..8e3a1fd938ab 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -252,7 +252,7 @@ config IP_MROUTE
252 252
253config IP_MROUTE_MULTIPLE_TABLES 253config IP_MROUTE_MULTIPLE_TABLES
254 bool "IP: multicast policy routing" 254 bool "IP: multicast policy routing"
255 depends on IP_ADVANCED_ROUTER 255 depends on IP_MROUTE && IP_ADVANCED_ROUTER
256 select FIB_RULES 256 select FIB_RULES
257 help 257 help
258 Normally, a multicast router runs a userspace daemon and decides 258 Normally, a multicast router runs a userspace daemon and decides
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 5df5fd74c6d1..7d8a2bcecb76 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -71,6 +71,9 @@
71 71
72struct mr_table { 72struct mr_table {
73 struct list_head list; 73 struct list_head list;
74#ifdef CONFIG_NET_NS
75 struct net *net;
76#endif
74 u32 id; 77 u32 id;
75 struct sock *mroute_sk; 78 struct sock *mroute_sk;
76 struct timer_list ipmr_expire_timer; 79 struct timer_list ipmr_expire_timer;
@@ -308,6 +311,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
308 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); 311 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
309 if (mrt == NULL) 312 if (mrt == NULL)
310 return NULL; 313 return NULL;
314 write_pnet(&mrt->net, net);
311 mrt->id = id; 315 mrt->id = id;
312 316
313 /* Forwarding cache */ 317 /* Forwarding cache */
@@ -580,7 +584,7 @@ static inline void ipmr_cache_free(struct mfc_cache *c)
580 584
581static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c) 585static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
582{ 586{
583 struct net *net = NULL; //mrt->net; 587 struct net *net = read_pnet(&mrt->net);
584 struct sk_buff *skb; 588 struct sk_buff *skb;
585 struct nlmsgerr *e; 589 struct nlmsgerr *e;
586 590
@@ -1089,12 +1093,14 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1089 * Check to see if we resolved a queued list. If so we 1093 * Check to see if we resolved a queued list. If so we
1090 * need to send on the frames and tidy up. 1094 * need to send on the frames and tidy up.
1091 */ 1095 */
1096 found = false;
1092 spin_lock_bh(&mfc_unres_lock); 1097 spin_lock_bh(&mfc_unres_lock);
1093 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) { 1098 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1094 if (uc->mfc_origin == c->mfc_origin && 1099 if (uc->mfc_origin == c->mfc_origin &&
1095 uc->mfc_mcastgrp == c->mfc_mcastgrp) { 1100 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1096 list_del(&uc->list); 1101 list_del(&uc->list);
1097 atomic_dec(&mrt->cache_resolve_queue_len); 1102 atomic_dec(&mrt->cache_resolve_queue_len);
1103 found = true;
1098 break; 1104 break;
1099 } 1105 }
1100 } 1106 }
@@ -1102,7 +1108,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1102 del_timer(&mrt->ipmr_expire_timer); 1108 del_timer(&mrt->ipmr_expire_timer);
1103 spin_unlock_bh(&mfc_unres_lock); 1109 spin_unlock_bh(&mfc_unres_lock);
1104 1110
1105 if (uc) { 1111 if (found) {
1106 ipmr_cache_resolve(net, mrt, uc, c); 1112 ipmr_cache_resolve(net, mrt, uc, c);
1107 ipmr_cache_free(uc); 1113 ipmr_cache_free(uc);
1108 } 1114 }