aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorBenjamin Thery <benjamin.thery@bull.net>2009-01-21 23:56:23 -0500
committerDavid S. Miller <davem@davemloft.net>2009-01-22 16:57:41 -0500
commit4feb88e5c694bfe414cbc3ce0e383f7f7038f90b (patch)
tree418140ffc541223205b921b9995b981a5cfbc2dd /net
parentf6bb451476be53d456e73bcfd82356afd680bbb0 (diff)
netns: ipmr: enable namespace support in ipv4 multicast routing code
This last patch makes the appropriate changes to use and propagate the network namespace where needed in IPv4 multicast routing code. This consists mainly in replacing all the remaining init_net occurences with current netns pointer retrieved from sockets, net devices or mfc_caches depending on the routines' contexts. Some routines receive a new 'struct net' parameter to propagate the current netns: * vif_add/vif_delete * ipmr_new_tunnel * mroute_clean_tables * ipmr_cache_find * ipmr_cache_report * ipmr_cache_unresolved * ipmr_mfc_add/ipmr_mfc_delete * ipmr_get_route * rt_fill_info (in route.c) Signed-off-by: Benjamin Thery <benjamin.thery@bull.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/ipmr.c243
-rw-r--r--net/ipv4/route.c11
2 files changed, 141 insertions, 113 deletions
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index a4fd97f1920c..21a6dc710f20 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -95,7 +95,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
95static struct kmem_cache *mrt_cachep __read_mostly; 95static struct kmem_cache *mrt_cachep __read_mostly;
96 96
97static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); 97static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
98static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert); 98static int ipmr_cache_report(struct net *net,
99 struct sk_buff *pkt, vifi_t vifi, int assert);
99static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); 100static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
100 101
101#ifdef CONFIG_IP_PIMSM_V2 102#ifdef CONFIG_IP_PIMSM_V2
@@ -108,9 +109,11 @@ static struct timer_list ipmr_expire_timer;
108 109
109static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) 110static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
110{ 111{
112 struct net *net = dev_net(dev);
113
111 dev_close(dev); 114 dev_close(dev);
112 115
113 dev = __dev_get_by_name(&init_net, "tunl0"); 116 dev = __dev_get_by_name(net, "tunl0");
114 if (dev) { 117 if (dev) {
115 const struct net_device_ops *ops = dev->netdev_ops; 118 const struct net_device_ops *ops = dev->netdev_ops;
116 struct ifreq ifr; 119 struct ifreq ifr;
@@ -136,11 +139,11 @@ static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
136} 139}
137 140
138static 141static
139struct net_device *ipmr_new_tunnel(struct vifctl *v) 142struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
140{ 143{
141 struct net_device *dev; 144 struct net_device *dev;
142 145
143 dev = __dev_get_by_name(&init_net, "tunl0"); 146 dev = __dev_get_by_name(net, "tunl0");
144 147
145 if (dev) { 148 if (dev) {
146 const struct net_device_ops *ops = dev->netdev_ops; 149 const struct net_device_ops *ops = dev->netdev_ops;
@@ -169,7 +172,8 @@ struct net_device *ipmr_new_tunnel(struct vifctl *v)
169 172
170 dev = NULL; 173 dev = NULL;
171 174
172 if (err == 0 && (dev = __dev_get_by_name(&init_net, p.name)) != NULL) { 175 if (err == 0 &&
176 (dev = __dev_get_by_name(net, p.name)) != NULL) {
173 dev->flags |= IFF_MULTICAST; 177 dev->flags |= IFF_MULTICAST;
174 178
175 in_dev = __in_dev_get_rtnl(dev); 179 in_dev = __in_dev_get_rtnl(dev);
@@ -199,10 +203,13 @@ failure:
199 203
200static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 204static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
201{ 205{
206 struct net *net = dev_net(dev);
207
202 read_lock(&mrt_lock); 208 read_lock(&mrt_lock);
203 dev->stats.tx_bytes += skb->len; 209 dev->stats.tx_bytes += skb->len;
204 dev->stats.tx_packets++; 210 dev->stats.tx_packets++;
205 ipmr_cache_report(skb, init_net.ipv4.mroute_reg_vif_num, IGMPMSG_WHOLEPKT); 211 ipmr_cache_report(net, skb, net->ipv4.mroute_reg_vif_num,
212 IGMPMSG_WHOLEPKT);
206 read_unlock(&mrt_lock); 213 read_unlock(&mrt_lock);
207 kfree_skb(skb); 214 kfree_skb(skb);
208 return 0; 215 return 0;
@@ -269,16 +276,16 @@ failure:
269 * @notify: Set to 1, if the caller is a notifier_call 276 * @notify: Set to 1, if the caller is a notifier_call
270 */ 277 */
271 278
272static int vif_delete(int vifi, int notify) 279static int vif_delete(struct net *net, int vifi, int notify)
273{ 280{
274 struct vif_device *v; 281 struct vif_device *v;
275 struct net_device *dev; 282 struct net_device *dev;
276 struct in_device *in_dev; 283 struct in_device *in_dev;
277 284
278 if (vifi < 0 || vifi >= init_net.ipv4.maxvif) 285 if (vifi < 0 || vifi >= net->ipv4.maxvif)
279 return -EADDRNOTAVAIL; 286 return -EADDRNOTAVAIL;
280 287
281 v = &init_net.ipv4.vif_table[vifi]; 288 v = &net->ipv4.vif_table[vifi];
282 289
283 write_lock_bh(&mrt_lock); 290 write_lock_bh(&mrt_lock);
284 dev = v->dev; 291 dev = v->dev;
@@ -290,17 +297,17 @@ static int vif_delete(int vifi, int notify)
290 } 297 }
291 298
292#ifdef CONFIG_IP_PIMSM 299#ifdef CONFIG_IP_PIMSM
293 if (vifi == init_net.ipv4.mroute_reg_vif_num) 300 if (vifi == net->ipv4.mroute_reg_vif_num)
294 init_net.ipv4.mroute_reg_vif_num = -1; 301 net->ipv4.mroute_reg_vif_num = -1;
295#endif 302#endif
296 303
297 if (vifi+1 == init_net.ipv4.maxvif) { 304 if (vifi+1 == net->ipv4.maxvif) {
298 int tmp; 305 int tmp;
299 for (tmp=vifi-1; tmp>=0; tmp--) { 306 for (tmp=vifi-1; tmp>=0; tmp--) {
300 if (VIF_EXISTS(&init_net, tmp)) 307 if (VIF_EXISTS(net, tmp))
301 break; 308 break;
302 } 309 }
303 init_net.ipv4.maxvif = tmp+1; 310 net->ipv4.maxvif = tmp+1;
304 } 311 }
305 312
306 write_unlock_bh(&mrt_lock); 313 write_unlock_bh(&mrt_lock);
@@ -333,8 +340,9 @@ static void ipmr_destroy_unres(struct mfc_cache *c)
333{ 340{
334 struct sk_buff *skb; 341 struct sk_buff *skb;
335 struct nlmsgerr *e; 342 struct nlmsgerr *e;
343 struct net *net = mfc_net(c);
336 344
337 atomic_dec(&init_net.ipv4.cache_resolve_queue_len); 345 atomic_dec(&net->ipv4.cache_resolve_queue_len);
338 346
339 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { 347 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
340 if (ip_hdr(skb)->version == 0) { 348 if (ip_hdr(skb)->version == 0) {
@@ -346,7 +354,7 @@ static void ipmr_destroy_unres(struct mfc_cache *c)
346 e->error = -ETIMEDOUT; 354 e->error = -ETIMEDOUT;
347 memset(&e->msg, 0, sizeof(e->msg)); 355 memset(&e->msg, 0, sizeof(e->msg));
348 356
349 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid); 357 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
350 } else 358 } else
351 kfree_skb(skb); 359 kfree_skb(skb);
352 } 360 }
@@ -401,13 +409,14 @@ out:
401static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls) 409static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
402{ 410{
403 int vifi; 411 int vifi;
412 struct net *net = mfc_net(cache);
404 413
405 cache->mfc_un.res.minvif = MAXVIFS; 414 cache->mfc_un.res.minvif = MAXVIFS;
406 cache->mfc_un.res.maxvif = 0; 415 cache->mfc_un.res.maxvif = 0;
407 memset(cache->mfc_un.res.ttls, 255, MAXVIFS); 416 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
408 417
409 for (vifi = 0; vifi < init_net.ipv4.maxvif; vifi++) { 418 for (vifi = 0; vifi < net->ipv4.maxvif; vifi++) {
410 if (VIF_EXISTS(&init_net, vifi) && 419 if (VIF_EXISTS(net, vifi) &&
411 ttls[vifi] && ttls[vifi] < 255) { 420 ttls[vifi] && ttls[vifi] < 255) {
412 cache->mfc_un.res.ttls[vifi] = ttls[vifi]; 421 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
413 if (cache->mfc_un.res.minvif > vifi) 422 if (cache->mfc_un.res.minvif > vifi)
@@ -418,16 +427,16 @@ static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
418 } 427 }
419} 428}
420 429
421static int vif_add(struct vifctl *vifc, int mrtsock) 430static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
422{ 431{
423 int vifi = vifc->vifc_vifi; 432 int vifi = vifc->vifc_vifi;
424 struct vif_device *v = &init_net.ipv4.vif_table[vifi]; 433 struct vif_device *v = &net->ipv4.vif_table[vifi];
425 struct net_device *dev; 434 struct net_device *dev;
426 struct in_device *in_dev; 435 struct in_device *in_dev;
427 int err; 436 int err;
428 437
429 /* Is vif busy ? */ 438 /* Is vif busy ? */
430 if (VIF_EXISTS(&init_net, vifi)) 439 if (VIF_EXISTS(net, vifi))
431 return -EADDRINUSE; 440 return -EADDRINUSE;
432 441
433 switch (vifc->vifc_flags) { 442 switch (vifc->vifc_flags) {
@@ -437,7 +446,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
437 * Special Purpose VIF in PIM 446 * Special Purpose VIF in PIM
438 * All the packets will be sent to the daemon 447 * All the packets will be sent to the daemon
439 */ 448 */
440 if (init_net.ipv4.mroute_reg_vif_num >= 0) 449 if (net->ipv4.mroute_reg_vif_num >= 0)
441 return -EADDRINUSE; 450 return -EADDRINUSE;
442 dev = ipmr_reg_vif(); 451 dev = ipmr_reg_vif();
443 if (!dev) 452 if (!dev)
@@ -451,7 +460,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
451 break; 460 break;
452#endif 461#endif
453 case VIFF_TUNNEL: 462 case VIFF_TUNNEL:
454 dev = ipmr_new_tunnel(vifc); 463 dev = ipmr_new_tunnel(net, vifc);
455 if (!dev) 464 if (!dev)
456 return -ENOBUFS; 465 return -ENOBUFS;
457 err = dev_set_allmulti(dev, 1); 466 err = dev_set_allmulti(dev, 1);
@@ -462,7 +471,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
462 } 471 }
463 break; 472 break;
464 case 0: 473 case 0:
465 dev = ip_dev_find(&init_net, vifc->vifc_lcl_addr.s_addr); 474 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
466 if (!dev) 475 if (!dev)
467 return -EADDRNOTAVAIL; 476 return -EADDRNOTAVAIL;
468 err = dev_set_allmulti(dev, 1); 477 err = dev_set_allmulti(dev, 1);
@@ -503,20 +512,22 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
503 v->dev = dev; 512 v->dev = dev;
504#ifdef CONFIG_IP_PIMSM 513#ifdef CONFIG_IP_PIMSM
505 if (v->flags&VIFF_REGISTER) 514 if (v->flags&VIFF_REGISTER)
506 init_net.ipv4.mroute_reg_vif_num = vifi; 515 net->ipv4.mroute_reg_vif_num = vifi;
507#endif 516#endif
508 if (vifi+1 > init_net.ipv4.maxvif) 517 if (vifi+1 > net->ipv4.maxvif)
509 init_net.ipv4.maxvif = vifi+1; 518 net->ipv4.maxvif = vifi+1;
510 write_unlock_bh(&mrt_lock); 519 write_unlock_bh(&mrt_lock);
511 return 0; 520 return 0;
512} 521}
513 522
514static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp) 523static struct mfc_cache *ipmr_cache_find(struct net *net,
524 __be32 origin,
525 __be32 mcastgrp)
515{ 526{
516 int line = MFC_HASH(mcastgrp, origin); 527 int line = MFC_HASH(mcastgrp, origin);
517 struct mfc_cache *c; 528 struct mfc_cache *c;
518 529
519 for (c = init_net.ipv4.mfc_cache_array[line]; c; c = c->next) { 530 for (c = net->ipv4.mfc_cache_array[line]; c; c = c->next) {
520 if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp) 531 if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp)
521 break; 532 break;
522 } 533 }
@@ -576,7 +587,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
576 memset(&e->msg, 0, sizeof(e->msg)); 587 memset(&e->msg, 0, sizeof(e->msg));
577 } 588 }
578 589
579 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid); 590 rtnl_unicast(skb, mfc_net(c), NETLINK_CB(skb).pid);
580 } else 591 } else
581 ip_mr_forward(skb, c, 0); 592 ip_mr_forward(skb, c, 0);
582 } 593 }
@@ -589,7 +600,8 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
589 * Called under mrt_lock. 600 * Called under mrt_lock.
590 */ 601 */
591 602
592static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) 603static int ipmr_cache_report(struct net *net,
604 struct sk_buff *pkt, vifi_t vifi, int assert)
593{ 605{
594 struct sk_buff *skb; 606 struct sk_buff *skb;
595 const int ihl = ip_hdrlen(pkt); 607 const int ihl = ip_hdrlen(pkt);
@@ -621,7 +633,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
621 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); 633 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
622 msg->im_msgtype = IGMPMSG_WHOLEPKT; 634 msg->im_msgtype = IGMPMSG_WHOLEPKT;
623 msg->im_mbz = 0; 635 msg->im_mbz = 0;
624 msg->im_vif = init_net.ipv4.mroute_reg_vif_num; 636 msg->im_vif = net->ipv4.mroute_reg_vif_num;
625 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; 637 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
626 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + 638 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
627 sizeof(struct iphdr)); 639 sizeof(struct iphdr));
@@ -653,7 +665,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
653 skb->transport_header = skb->network_header; 665 skb->transport_header = skb->network_header;
654 } 666 }
655 667
656 if (init_net.ipv4.mroute_sk == NULL) { 668 if (net->ipv4.mroute_sk == NULL) {
657 kfree_skb(skb); 669 kfree_skb(skb);
658 return -EINVAL; 670 return -EINVAL;
659 } 671 }
@@ -661,7 +673,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
661 /* 673 /*
662 * Deliver to mrouted 674 * Deliver to mrouted
663 */ 675 */
664 ret = sock_queue_rcv_skb(init_net.ipv4.mroute_sk, skb); 676 ret = sock_queue_rcv_skb(net->ipv4.mroute_sk, skb);
665 if (ret < 0) { 677 if (ret < 0) {
666 if (net_ratelimit()) 678 if (net_ratelimit())
667 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); 679 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
@@ -676,7 +688,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
676 */ 688 */
677 689
678static int 690static int
679ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) 691ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
680{ 692{
681 int err; 693 int err;
682 struct mfc_cache *c; 694 struct mfc_cache *c;
@@ -684,7 +696,7 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
684 696
685 spin_lock_bh(&mfc_unres_lock); 697 spin_lock_bh(&mfc_unres_lock);
686 for (c=mfc_unres_queue; c; c=c->next) { 698 for (c=mfc_unres_queue; c; c=c->next) {
687 if (net_eq(mfc_net(c), &init_net) && 699 if (net_eq(mfc_net(c), net) &&
688 c->mfc_mcastgrp == iph->daddr && 700 c->mfc_mcastgrp == iph->daddr &&
689 c->mfc_origin == iph->saddr) 701 c->mfc_origin == iph->saddr)
690 break; 702 break;
@@ -695,8 +707,8 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
695 * Create a new entry if allowable 707 * Create a new entry if allowable
696 */ 708 */
697 709
698 if (atomic_read(&init_net.ipv4.cache_resolve_queue_len) >= 10 || 710 if (atomic_read(&net->ipv4.cache_resolve_queue_len) >= 10 ||
699 (c = ipmr_cache_alloc_unres(&init_net)) == NULL) { 711 (c = ipmr_cache_alloc_unres(net)) == NULL) {
700 spin_unlock_bh(&mfc_unres_lock); 712 spin_unlock_bh(&mfc_unres_lock);
701 713
702 kfree_skb(skb); 714 kfree_skb(skb);
@@ -713,7 +725,8 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
713 /* 725 /*
714 * Reflect first query at mrouted. 726 * Reflect first query at mrouted.
715 */ 727 */
716 if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) { 728 err = ipmr_cache_report(net, skb, vifi, IGMPMSG_NOCACHE);
729 if (err < 0) {
717 /* If the report failed throw the cache entry 730 /* If the report failed throw the cache entry
718 out - Brad Parker 731 out - Brad Parker
719 */ 732 */
@@ -724,7 +737,7 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
724 return err; 737 return err;
725 } 738 }
726 739
727 atomic_inc(&init_net.ipv4.cache_resolve_queue_len); 740 atomic_inc(&net->ipv4.cache_resolve_queue_len);
728 c->next = mfc_unres_queue; 741 c->next = mfc_unres_queue;
729 mfc_unres_queue = c; 742 mfc_unres_queue = c;
730 743
@@ -750,14 +763,14 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
750 * MFC cache manipulation by user space mroute daemon 763 * MFC cache manipulation by user space mroute daemon
751 */ 764 */
752 765
753static int ipmr_mfc_delete(struct mfcctl *mfc) 766static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc)
754{ 767{
755 int line; 768 int line;
756 struct mfc_cache *c, **cp; 769 struct mfc_cache *c, **cp;
757 770
758 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 771 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
759 772
760 for (cp = &init_net.ipv4.mfc_cache_array[line]; 773 for (cp = &net->ipv4.mfc_cache_array[line];
761 (c = *cp) != NULL; cp = &c->next) { 774 (c = *cp) != NULL; cp = &c->next) {
762 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 775 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
763 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { 776 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
@@ -772,14 +785,14 @@ static int ipmr_mfc_delete(struct mfcctl *mfc)
772 return -ENOENT; 785 return -ENOENT;
773} 786}
774 787
775static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock) 788static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
776{ 789{
777 int line; 790 int line;
778 struct mfc_cache *uc, *c, **cp; 791 struct mfc_cache *uc, *c, **cp;
779 792
780 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 793 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
781 794
782 for (cp = &init_net.ipv4.mfc_cache_array[line]; 795 for (cp = &net->ipv4.mfc_cache_array[line];
783 (c = *cp) != NULL; cp = &c->next) { 796 (c = *cp) != NULL; cp = &c->next) {
784 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 797 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
785 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) 798 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr)
@@ -799,7 +812,7 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
799 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) 812 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
800 return -EINVAL; 813 return -EINVAL;
801 814
802 c = ipmr_cache_alloc(&init_net); 815 c = ipmr_cache_alloc(net);
803 if (c == NULL) 816 if (c == NULL)
804 return -ENOMEM; 817 return -ENOMEM;
805 818
@@ -811,8 +824,8 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
811 c->mfc_flags |= MFC_STATIC; 824 c->mfc_flags |= MFC_STATIC;
812 825
813 write_lock_bh(&mrt_lock); 826 write_lock_bh(&mrt_lock);
814 c->next = init_net.ipv4.mfc_cache_array[line]; 827 c->next = net->ipv4.mfc_cache_array[line];
815 init_net.ipv4.mfc_cache_array[line] = c; 828 net->ipv4.mfc_cache_array[line] = c;
816 write_unlock_bh(&mrt_lock); 829 write_unlock_bh(&mrt_lock);
817 830
818 /* 831 /*
@@ -822,11 +835,11 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
822 spin_lock_bh(&mfc_unres_lock); 835 spin_lock_bh(&mfc_unres_lock);
823 for (cp = &mfc_unres_queue; (uc=*cp) != NULL; 836 for (cp = &mfc_unres_queue; (uc=*cp) != NULL;
824 cp = &uc->next) { 837 cp = &uc->next) {
825 if (net_eq(mfc_net(uc), &init_net) && 838 if (net_eq(mfc_net(uc), net) &&
826 uc->mfc_origin == c->mfc_origin && 839 uc->mfc_origin == c->mfc_origin &&
827 uc->mfc_mcastgrp == c->mfc_mcastgrp) { 840 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
828 *cp = uc->next; 841 *cp = uc->next;
829 atomic_dec(&init_net.ipv4.cache_resolve_queue_len); 842 atomic_dec(&net->ipv4.cache_resolve_queue_len);
830 break; 843 break;
831 } 844 }
832 } 845 }
@@ -845,16 +858,16 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
845 * Close the multicast socket, and clear the vif tables etc 858 * Close the multicast socket, and clear the vif tables etc
846 */ 859 */
847 860
848static void mroute_clean_tables(struct sock *sk) 861static void mroute_clean_tables(struct net *net)
849{ 862{
850 int i; 863 int i;
851 864
852 /* 865 /*
853 * Shut down all active vif entries 866 * Shut down all active vif entries
854 */ 867 */
855 for (i = 0; i < init_net.ipv4.maxvif; i++) { 868 for (i = 0; i < net->ipv4.maxvif; i++) {
856 if (!(init_net.ipv4.vif_table[i].flags&VIFF_STATIC)) 869 if (!(net->ipv4.vif_table[i].flags&VIFF_STATIC))
857 vif_delete(i, 0); 870 vif_delete(net, i, 0);
858 } 871 }
859 872
860 /* 873 /*
@@ -863,7 +876,7 @@ static void mroute_clean_tables(struct sock *sk)
863 for (i=0; i<MFC_LINES; i++) { 876 for (i=0; i<MFC_LINES; i++) {
864 struct mfc_cache *c, **cp; 877 struct mfc_cache *c, **cp;
865 878
866 cp = &init_net.ipv4.mfc_cache_array[i]; 879 cp = &net->ipv4.mfc_cache_array[i];
867 while ((c = *cp) != NULL) { 880 while ((c = *cp) != NULL) {
868 if (c->mfc_flags&MFC_STATIC) { 881 if (c->mfc_flags&MFC_STATIC) {
869 cp = &c->next; 882 cp = &c->next;
@@ -877,13 +890,13 @@ static void mroute_clean_tables(struct sock *sk)
877 } 890 }
878 } 891 }
879 892
880 if (atomic_read(&init_net.ipv4.cache_resolve_queue_len) != 0) { 893 if (atomic_read(&net->ipv4.cache_resolve_queue_len) != 0) {
881 struct mfc_cache *c, **cp; 894 struct mfc_cache *c, **cp;
882 895
883 spin_lock_bh(&mfc_unres_lock); 896 spin_lock_bh(&mfc_unres_lock);
884 cp = &mfc_unres_queue; 897 cp = &mfc_unres_queue;
885 while ((c = *cp) != NULL) { 898 while ((c = *cp) != NULL) {
886 if (!net_eq(mfc_net(c), &init_net)) { 899 if (!net_eq(mfc_net(c), net)) {
887 cp = &c->next; 900 cp = &c->next;
888 continue; 901 continue;
889 } 902 }
@@ -897,15 +910,17 @@ static void mroute_clean_tables(struct sock *sk)
897 910
898static void mrtsock_destruct(struct sock *sk) 911static void mrtsock_destruct(struct sock *sk)
899{ 912{
913 struct net *net = sock_net(sk);
914
900 rtnl_lock(); 915 rtnl_lock();
901 if (sk == init_net.ipv4.mroute_sk) { 916 if (sk == net->ipv4.mroute_sk) {
902 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)--; 917 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
903 918
904 write_lock_bh(&mrt_lock); 919 write_lock_bh(&mrt_lock);
905 init_net.ipv4.mroute_sk = NULL; 920 net->ipv4.mroute_sk = NULL;
906 write_unlock_bh(&mrt_lock); 921 write_unlock_bh(&mrt_lock);
907 922
908 mroute_clean_tables(sk); 923 mroute_clean_tables(net);
909 } 924 }
910 rtnl_unlock(); 925 rtnl_unlock();
911} 926}
@@ -922,9 +937,10 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
922 int ret; 937 int ret;
923 struct vifctl vif; 938 struct vifctl vif;
924 struct mfcctl mfc; 939 struct mfcctl mfc;
940 struct net *net = sock_net(sk);
925 941
926 if (optname != MRT_INIT) { 942 if (optname != MRT_INIT) {
927 if (sk != init_net.ipv4.mroute_sk && !capable(CAP_NET_ADMIN)) 943 if (sk != net->ipv4.mroute_sk && !capable(CAP_NET_ADMIN))
928 return -EACCES; 944 return -EACCES;
929 } 945 }
930 946
@@ -937,7 +953,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
937 return -ENOPROTOOPT; 953 return -ENOPROTOOPT;
938 954
939 rtnl_lock(); 955 rtnl_lock();
940 if (init_net.ipv4.mroute_sk) { 956 if (net->ipv4.mroute_sk) {
941 rtnl_unlock(); 957 rtnl_unlock();
942 return -EADDRINUSE; 958 return -EADDRINUSE;
943 } 959 }
@@ -945,15 +961,15 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
945 ret = ip_ra_control(sk, 1, mrtsock_destruct); 961 ret = ip_ra_control(sk, 1, mrtsock_destruct);
946 if (ret == 0) { 962 if (ret == 0) {
947 write_lock_bh(&mrt_lock); 963 write_lock_bh(&mrt_lock);
948 init_net.ipv4.mroute_sk = sk; 964 net->ipv4.mroute_sk = sk;
949 write_unlock_bh(&mrt_lock); 965 write_unlock_bh(&mrt_lock);
950 966
951 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)++; 967 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
952 } 968 }
953 rtnl_unlock(); 969 rtnl_unlock();
954 return ret; 970 return ret;
955 case MRT_DONE: 971 case MRT_DONE:
956 if (sk != init_net.ipv4.mroute_sk) 972 if (sk != net->ipv4.mroute_sk)
957 return -EACCES; 973 return -EACCES;
958 return ip_ra_control(sk, 0, NULL); 974 return ip_ra_control(sk, 0, NULL);
959 case MRT_ADD_VIF: 975 case MRT_ADD_VIF:
@@ -966,9 +982,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
966 return -ENFILE; 982 return -ENFILE;
967 rtnl_lock(); 983 rtnl_lock();
968 if (optname == MRT_ADD_VIF) { 984 if (optname == MRT_ADD_VIF) {
969 ret = vif_add(&vif, sk == init_net.ipv4.mroute_sk); 985 ret = vif_add(net, &vif, sk == net->ipv4.mroute_sk);
970 } else { 986 } else {
971 ret = vif_delete(vif.vifc_vifi, 0); 987 ret = vif_delete(net, vif.vifc_vifi, 0);
972 } 988 }
973 rtnl_unlock(); 989 rtnl_unlock();
974 return ret; 990 return ret;
@@ -985,9 +1001,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
985 return -EFAULT; 1001 return -EFAULT;
986 rtnl_lock(); 1002 rtnl_lock();
987 if (optname == MRT_DEL_MFC) 1003 if (optname == MRT_DEL_MFC)
988 ret = ipmr_mfc_delete(&mfc); 1004 ret = ipmr_mfc_delete(net, &mfc);
989 else 1005 else
990 ret = ipmr_mfc_add(&mfc, sk == init_net.ipv4.mroute_sk); 1006 ret = ipmr_mfc_add(net, &mfc, sk == net->ipv4.mroute_sk);
991 rtnl_unlock(); 1007 rtnl_unlock();
992 return ret; 1008 return ret;
993 /* 1009 /*
@@ -998,7 +1014,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
998 int v; 1014 int v;
999 if (get_user(v,(int __user *)optval)) 1015 if (get_user(v,(int __user *)optval))
1000 return -EFAULT; 1016 return -EFAULT;
1001 init_net.ipv4.mroute_do_assert = (v) ? 1 : 0; 1017 net->ipv4.mroute_do_assert = (v) ? 1 : 0;
1002 return 0; 1018 return 0;
1003 } 1019 }
1004#ifdef CONFIG_IP_PIMSM 1020#ifdef CONFIG_IP_PIMSM
@@ -1012,11 +1028,11 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
1012 1028
1013 rtnl_lock(); 1029 rtnl_lock();
1014 ret = 0; 1030 ret = 0;
1015 if (v != init_net.ipv4.mroute_do_pim) { 1031 if (v != net->ipv4.mroute_do_pim) {
1016 init_net.ipv4.mroute_do_pim = v; 1032 net->ipv4.mroute_do_pim = v;
1017 init_net.ipv4.mroute_do_assert = v; 1033 net->ipv4.mroute_do_assert = v;
1018#ifdef CONFIG_IP_PIMSM_V2 1034#ifdef CONFIG_IP_PIMSM_V2
1019 if (init_net.ipv4.mroute_do_pim) 1035 if (net->ipv4.mroute_do_pim)
1020 ret = inet_add_protocol(&pim_protocol, 1036 ret = inet_add_protocol(&pim_protocol,
1021 IPPROTO_PIM); 1037 IPPROTO_PIM);
1022 else 1038 else
@@ -1047,6 +1063,7 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
1047{ 1063{
1048 int olr; 1064 int olr;
1049 int val; 1065 int val;
1066 struct net *net = sock_net(sk);
1050 1067
1051 if (optname != MRT_VERSION && 1068 if (optname != MRT_VERSION &&
1052#ifdef CONFIG_IP_PIMSM 1069#ifdef CONFIG_IP_PIMSM
@@ -1068,10 +1085,10 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
1068 val = 0x0305; 1085 val = 0x0305;
1069#ifdef CONFIG_IP_PIMSM 1086#ifdef CONFIG_IP_PIMSM
1070 else if (optname == MRT_PIM) 1087 else if (optname == MRT_PIM)
1071 val = init_net.ipv4.mroute_do_pim; 1088 val = net->ipv4.mroute_do_pim;
1072#endif 1089#endif
1073 else 1090 else
1074 val = init_net.ipv4.mroute_do_assert; 1091 val = net->ipv4.mroute_do_assert;
1075 if (copy_to_user(optval, &val, olr)) 1092 if (copy_to_user(optval, &val, olr))
1076 return -EFAULT; 1093 return -EFAULT;
1077 return 0; 1094 return 0;
@@ -1087,16 +1104,17 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1087 struct sioc_vif_req vr; 1104 struct sioc_vif_req vr;
1088 struct vif_device *vif; 1105 struct vif_device *vif;
1089 struct mfc_cache *c; 1106 struct mfc_cache *c;
1107 struct net *net = sock_net(sk);
1090 1108
1091 switch (cmd) { 1109 switch (cmd) {
1092 case SIOCGETVIFCNT: 1110 case SIOCGETVIFCNT:
1093 if (copy_from_user(&vr, arg, sizeof(vr))) 1111 if (copy_from_user(&vr, arg, sizeof(vr)))
1094 return -EFAULT; 1112 return -EFAULT;
1095 if (vr.vifi >= init_net.ipv4.maxvif) 1113 if (vr.vifi >= net->ipv4.maxvif)
1096 return -EINVAL; 1114 return -EINVAL;
1097 read_lock(&mrt_lock); 1115 read_lock(&mrt_lock);
1098 vif = &init_net.ipv4.vif_table[vr.vifi]; 1116 vif = &net->ipv4.vif_table[vr.vifi];
1099 if (VIF_EXISTS(&init_net, vr.vifi)) { 1117 if (VIF_EXISTS(net, vr.vifi)) {
1100 vr.icount = vif->pkt_in; 1118 vr.icount = vif->pkt_in;
1101 vr.ocount = vif->pkt_out; 1119 vr.ocount = vif->pkt_out;
1102 vr.ibytes = vif->bytes_in; 1120 vr.ibytes = vif->bytes_in;
@@ -1114,7 +1132,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1114 return -EFAULT; 1132 return -EFAULT;
1115 1133
1116 read_lock(&mrt_lock); 1134 read_lock(&mrt_lock);
1117 c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr); 1135 c = ipmr_cache_find(net, sr.src.s_addr, sr.grp.s_addr);
1118 if (c) { 1136 if (c) {
1119 sr.pktcnt = c->mfc_un.res.pkt; 1137 sr.pktcnt = c->mfc_un.res.pkt;
1120 sr.bytecnt = c->mfc_un.res.bytes; 1138 sr.bytecnt = c->mfc_un.res.bytes;
@@ -1136,18 +1154,19 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1136static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr) 1154static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1137{ 1155{
1138 struct net_device *dev = ptr; 1156 struct net_device *dev = ptr;
1157 struct net *net = dev_net(dev);
1139 struct vif_device *v; 1158 struct vif_device *v;
1140 int ct; 1159 int ct;
1141 1160
1142 if (!net_eq(dev_net(dev), &init_net)) 1161 if (!net_eq(dev_net(dev), net))
1143 return NOTIFY_DONE; 1162 return NOTIFY_DONE;
1144 1163
1145 if (event != NETDEV_UNREGISTER) 1164 if (event != NETDEV_UNREGISTER)
1146 return NOTIFY_DONE; 1165 return NOTIFY_DONE;
1147 v = &init_net.ipv4.vif_table[0]; 1166 v = &net->ipv4.vif_table[0];
1148 for (ct = 0; ct < init_net.ipv4.maxvif; ct++, v++) { 1167 for (ct = 0; ct < net->ipv4.maxvif; ct++, v++) {
1149 if (v->dev == dev) 1168 if (v->dev == dev)
1150 vif_delete(ct, 1); 1169 vif_delete(net, ct, 1);
1151 } 1170 }
1152 return NOTIFY_DONE; 1171 return NOTIFY_DONE;
1153} 1172}
@@ -1207,8 +1226,9 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
1207 1226
1208static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) 1227static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1209{ 1228{
1229 struct net *net = mfc_net(c);
1210 const struct iphdr *iph = ip_hdr(skb); 1230 const struct iphdr *iph = ip_hdr(skb);
1211 struct vif_device *vif = &init_net.ipv4.vif_table[vifi]; 1231 struct vif_device *vif = &net->ipv4.vif_table[vifi];
1212 struct net_device *dev; 1232 struct net_device *dev;
1213 struct rtable *rt; 1233 struct rtable *rt;
1214 int encap = 0; 1234 int encap = 0;
@@ -1222,7 +1242,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1222 vif->bytes_out += skb->len; 1242 vif->bytes_out += skb->len;
1223 vif->dev->stats.tx_bytes += skb->len; 1243 vif->dev->stats.tx_bytes += skb->len;
1224 vif->dev->stats.tx_packets++; 1244 vif->dev->stats.tx_packets++;
1225 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT); 1245 ipmr_cache_report(net, skb, vifi, IGMPMSG_WHOLEPKT);
1226 kfree_skb(skb); 1246 kfree_skb(skb);
1227 return; 1247 return;
1228 } 1248 }
@@ -1235,7 +1255,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1235 .saddr = vif->local, 1255 .saddr = vif->local,
1236 .tos = RT_TOS(iph->tos) } }, 1256 .tos = RT_TOS(iph->tos) } },
1237 .proto = IPPROTO_IPIP }; 1257 .proto = IPPROTO_IPIP };
1238 if (ip_route_output_key(&init_net, &rt, &fl)) 1258 if (ip_route_output_key(net, &rt, &fl))
1239 goto out_free; 1259 goto out_free;
1240 encap = sizeof(struct iphdr); 1260 encap = sizeof(struct iphdr);
1241 } else { 1261 } else {
@@ -1244,7 +1264,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1244 { .daddr = iph->daddr, 1264 { .daddr = iph->daddr,
1245 .tos = RT_TOS(iph->tos) } }, 1265 .tos = RT_TOS(iph->tos) } },
1246 .proto = IPPROTO_IPIP }; 1266 .proto = IPPROTO_IPIP };
1247 if (ip_route_output_key(&init_net, &rt, &fl)) 1267 if (ip_route_output_key(net, &rt, &fl))
1248 goto out_free; 1268 goto out_free;
1249 } 1269 }
1250 1270
@@ -1308,9 +1328,10 @@ out_free:
1308 1328
1309static int ipmr_find_vif(struct net_device *dev) 1329static int ipmr_find_vif(struct net_device *dev)
1310{ 1330{
1331 struct net *net = dev_net(dev);
1311 int ct; 1332 int ct;
1312 for (ct = init_net.ipv4.maxvif-1; ct >= 0; ct--) { 1333 for (ct = net->ipv4.maxvif-1; ct >= 0; ct--) {
1313 if (init_net.ipv4.vif_table[ct].dev == dev) 1334 if (net->ipv4.vif_table[ct].dev == dev)
1314 break; 1335 break;
1315 } 1336 }
1316 return ct; 1337 return ct;
@@ -1322,6 +1343,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1322{ 1343{
1323 int psend = -1; 1344 int psend = -1;
1324 int vif, ct; 1345 int vif, ct;
1346 struct net *net = mfc_net(cache);
1325 1347
1326 vif = cache->mfc_parent; 1348 vif = cache->mfc_parent;
1327 cache->mfc_un.res.pkt++; 1349 cache->mfc_un.res.pkt++;
@@ -1330,7 +1352,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1330 /* 1352 /*
1331 * Wrong interface: drop packet and (maybe) send PIM assert. 1353 * Wrong interface: drop packet and (maybe) send PIM assert.
1332 */ 1354 */
1333 if (init_net.ipv4.vif_table[vif].dev != skb->dev) { 1355 if (net->ipv4.vif_table[vif].dev != skb->dev) {
1334 int true_vifi; 1356 int true_vifi;
1335 1357
1336 if (skb->rtable->fl.iif == 0) { 1358 if (skb->rtable->fl.iif == 0) {
@@ -1351,24 +1373,24 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1351 cache->mfc_un.res.wrong_if++; 1373 cache->mfc_un.res.wrong_if++;
1352 true_vifi = ipmr_find_vif(skb->dev); 1374 true_vifi = ipmr_find_vif(skb->dev);
1353 1375
1354 if (true_vifi >= 0 && init_net.ipv4.mroute_do_assert && 1376 if (true_vifi >= 0 && net->ipv4.mroute_do_assert &&
1355 /* pimsm uses asserts, when switching from RPT to SPT, 1377 /* pimsm uses asserts, when switching from RPT to SPT,
1356 so that we cannot check that packet arrived on an oif. 1378 so that we cannot check that packet arrived on an oif.
1357 It is bad, but otherwise we would need to move pretty 1379 It is bad, but otherwise we would need to move pretty
1358 large chunk of pimd to kernel. Ough... --ANK 1380 large chunk of pimd to kernel. Ough... --ANK
1359 */ 1381 */
1360 (init_net.ipv4.mroute_do_pim || 1382 (net->ipv4.mroute_do_pim ||
1361 cache->mfc_un.res.ttls[true_vifi] < 255) && 1383 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1362 time_after(jiffies, 1384 time_after(jiffies,
1363 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { 1385 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1364 cache->mfc_un.res.last_assert = jiffies; 1386 cache->mfc_un.res.last_assert = jiffies;
1365 ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF); 1387 ipmr_cache_report(net, skb, true_vifi, IGMPMSG_WRONGVIF);
1366 } 1388 }
1367 goto dont_forward; 1389 goto dont_forward;
1368 } 1390 }
1369 1391
1370 init_net.ipv4.vif_table[vif].pkt_in++; 1392 net->ipv4.vif_table[vif].pkt_in++;
1371 init_net.ipv4.vif_table[vif].bytes_in += skb->len; 1393 net->ipv4.vif_table[vif].bytes_in += skb->len;
1372 1394
1373 /* 1395 /*
1374 * Forward the frame 1396 * Forward the frame
@@ -1408,6 +1430,7 @@ dont_forward:
1408int ip_mr_input(struct sk_buff *skb) 1430int ip_mr_input(struct sk_buff *skb)
1409{ 1431{
1410 struct mfc_cache *cache; 1432 struct mfc_cache *cache;
1433 struct net *net = dev_net(skb->dev);
1411 int local = skb->rtable->rt_flags&RTCF_LOCAL; 1434 int local = skb->rtable->rt_flags&RTCF_LOCAL;
1412 1435
1413 /* Packet is looped back after forward, it should not be 1436 /* Packet is looped back after forward, it should not be
@@ -1428,9 +1451,9 @@ int ip_mr_input(struct sk_buff *skb)
1428 that we can forward NO IGMP messages. 1451 that we can forward NO IGMP messages.
1429 */ 1452 */
1430 read_lock(&mrt_lock); 1453 read_lock(&mrt_lock);
1431 if (init_net.ipv4.mroute_sk) { 1454 if (net->ipv4.mroute_sk) {
1432 nf_reset(skb); 1455 nf_reset(skb);
1433 raw_rcv(init_net.ipv4.mroute_sk, skb); 1456 raw_rcv(net->ipv4.mroute_sk, skb);
1434 read_unlock(&mrt_lock); 1457 read_unlock(&mrt_lock);
1435 return 0; 1458 return 0;
1436 } 1459 }
@@ -1439,7 +1462,7 @@ int ip_mr_input(struct sk_buff *skb)
1439 } 1462 }
1440 1463
1441 read_lock(&mrt_lock); 1464 read_lock(&mrt_lock);
1442 cache = ipmr_cache_find(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); 1465 cache = ipmr_cache_find(net, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1443 1466
1444 /* 1467 /*
1445 * No usable cache entry 1468 * No usable cache entry
@@ -1459,7 +1482,7 @@ int ip_mr_input(struct sk_buff *skb)
1459 1482
1460 vif = ipmr_find_vif(skb->dev); 1483 vif = ipmr_find_vif(skb->dev);
1461 if (vif >= 0) { 1484 if (vif >= 0) {
1462 int err = ipmr_cache_unresolved(vif, skb); 1485 int err = ipmr_cache_unresolved(net, vif, skb);
1463 read_unlock(&mrt_lock); 1486 read_unlock(&mrt_lock);
1464 1487
1465 return err; 1488 return err;
@@ -1490,6 +1513,7 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
1490{ 1513{
1491 struct net_device *reg_dev = NULL; 1514 struct net_device *reg_dev = NULL;
1492 struct iphdr *encap; 1515 struct iphdr *encap;
1516 struct net *net = dev_net(skb->dev);
1493 1517
1494 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); 1518 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
1495 /* 1519 /*
@@ -1504,8 +1528,8 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
1504 return 1; 1528 return 1;
1505 1529
1506 read_lock(&mrt_lock); 1530 read_lock(&mrt_lock);
1507 if (init_net.ipv4.mroute_reg_vif_num >= 0) 1531 if (net->ipv4.mroute_reg_vif_num >= 0)
1508 reg_dev = init_net.ipv4.vif_table[init_net.ipv4.mroute_reg_vif_num].dev; 1532 reg_dev = net->ipv4.vif_table[net->ipv4.mroute_reg_vif_num].dev;
1509 if (reg_dev) 1533 if (reg_dev)
1510 dev_hold(reg_dev); 1534 dev_hold(reg_dev);
1511 read_unlock(&mrt_lock); 1535 read_unlock(&mrt_lock);
@@ -1540,13 +1564,14 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
1540int pim_rcv_v1(struct sk_buff * skb) 1564int pim_rcv_v1(struct sk_buff * skb)
1541{ 1565{
1542 struct igmphdr *pim; 1566 struct igmphdr *pim;
1567 struct net *net = dev_net(skb->dev);
1543 1568
1544 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) 1569 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1545 goto drop; 1570 goto drop;
1546 1571
1547 pim = igmp_hdr(skb); 1572 pim = igmp_hdr(skb);
1548 1573
1549 if (!init_net.ipv4.mroute_do_pim || 1574 if (!net->ipv4.mroute_do_pim ||
1550 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) 1575 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1551 goto drop; 1576 goto drop;
1552 1577
@@ -1586,7 +1611,8 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
1586{ 1611{
1587 int ct; 1612 int ct;
1588 struct rtnexthop *nhp; 1613 struct rtnexthop *nhp;
1589 struct net_device *dev = init_net.ipv4.vif_table[c->mfc_parent].dev; 1614 struct net *net = mfc_net(c);
1615 struct net_device *dev = net->ipv4.vif_table[c->mfc_parent].dev;
1590 u8 *b = skb_tail_pointer(skb); 1616 u8 *b = skb_tail_pointer(skb);
1591 struct rtattr *mp_head; 1617 struct rtattr *mp_head;
1592 1618
@@ -1602,7 +1628,7 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
1602 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 1628 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1603 nhp->rtnh_flags = 0; 1629 nhp->rtnh_flags = 0;
1604 nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; 1630 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1605 nhp->rtnh_ifindex = init_net.ipv4.vif_table[ct].dev->ifindex; 1631 nhp->rtnh_ifindex = net->ipv4.vif_table[ct].dev->ifindex;
1606 nhp->rtnh_len = sizeof(*nhp); 1632 nhp->rtnh_len = sizeof(*nhp);
1607 } 1633 }
1608 } 1634 }
@@ -1616,14 +1642,15 @@ rtattr_failure:
1616 return -EMSGSIZE; 1642 return -EMSGSIZE;
1617} 1643}
1618 1644
1619int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait) 1645int ipmr_get_route(struct net *net,
1646 struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1620{ 1647{
1621 int err; 1648 int err;
1622 struct mfc_cache *cache; 1649 struct mfc_cache *cache;
1623 struct rtable *rt = skb->rtable; 1650 struct rtable *rt = skb->rtable;
1624 1651
1625 read_lock(&mrt_lock); 1652 read_lock(&mrt_lock);
1626 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst); 1653 cache = ipmr_cache_find(net, rt->rt_src, rt->rt_dst);
1627 1654
1628 if (cache == NULL) { 1655 if (cache == NULL) {
1629 struct sk_buff *skb2; 1656 struct sk_buff *skb2;
@@ -1654,7 +1681,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1654 iph->saddr = rt->rt_src; 1681 iph->saddr = rt->rt_src;
1655 iph->daddr = rt->rt_dst; 1682 iph->daddr = rt->rt_dst;
1656 iph->version = 0; 1683 iph->version = 0;
1657 err = ipmr_cache_unresolved(vif, skb2); 1684 err = ipmr_cache_unresolved(net, vif, skb2);
1658 read_unlock(&mrt_lock); 1685 read_unlock(&mrt_lock);
1659 return err; 1686 return err;
1660 } 1687 }
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 97f71153584f..6a9e204c8024 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2779,7 +2779,8 @@ int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
2779 return ip_route_output_flow(net, rp, flp, NULL, 0); 2779 return ip_route_output_flow(net, rp, flp, NULL, 0);
2780} 2780}
2781 2781
2782static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, 2782static int rt_fill_info(struct net *net,
2783 struct sk_buff *skb, u32 pid, u32 seq, int event,
2783 int nowait, unsigned int flags) 2784 int nowait, unsigned int flags)
2784{ 2785{
2785 struct rtable *rt = skb->rtable; 2786 struct rtable *rt = skb->rtable;
@@ -2844,8 +2845,8 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
2844 __be32 dst = rt->rt_dst; 2845 __be32 dst = rt->rt_dst;
2845 2846
2846 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) && 2847 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2847 IPV4_DEVCONF_ALL(&init_net, MC_FORWARDING)) { 2848 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2848 int err = ipmr_get_route(skb, r, nowait); 2849 int err = ipmr_get_route(net, skb, r, nowait);
2849 if (err <= 0) { 2850 if (err <= 0) {
2850 if (!nowait) { 2851 if (!nowait) {
2851 if (err == 0) 2852 if (err == 0)
@@ -2950,7 +2951,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2950 if (rtm->rtm_flags & RTM_F_NOTIFY) 2951 if (rtm->rtm_flags & RTM_F_NOTIFY)
2951 rt->rt_flags |= RTCF_NOTIFY; 2952 rt->rt_flags |= RTCF_NOTIFY;
2952 2953
2953 err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, 2954 err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
2954 RTM_NEWROUTE, 0, 0); 2955 RTM_NEWROUTE, 0, 0);
2955 if (err <= 0) 2956 if (err <= 0)
2956 goto errout_free; 2957 goto errout_free;
@@ -2988,7 +2989,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2988 if (rt_is_expired(rt)) 2989 if (rt_is_expired(rt))
2989 continue; 2990 continue;
2990 skb->dst = dst_clone(&rt->u.dst); 2991 skb->dst = dst_clone(&rt->u.dst);
2991 if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid, 2992 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
2992 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 2993 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
2993 1, NLM_F_MULTI) <= 0) { 2994 1, NLM_F_MULTI) <= 0) {
2994 dst_release(xchg(&skb->dst, NULL)); 2995 dst_release(xchg(&skb->dst, NULL));