aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2010-04-13 01:03:22 -0400
committerDavid S. Miller <davem@davemloft.net>2010-04-13 17:49:34 -0400
commit0c12295a741d3186987f96f518cfbdaf01abb087 (patch)
treee71d0f5e5193870318e86cd519edf728b1e2a079 /net
parent862465f2e7e90975e7bf0ecfbb171dd3adedd950 (diff)
ipv4: ipmr: move mroute data into seperate structure
Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/ipmr.c369
1 files changed, 199 insertions, 170 deletions
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 21b5edc2f343..498f4e907d52 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -68,6 +68,21 @@
68#define CONFIG_IP_PIMSM 1 68#define CONFIG_IP_PIMSM 1
69#endif 69#endif
70 70
71struct mr_table {
72 struct sock *mroute_sk;
73 struct timer_list ipmr_expire_timer;
74 struct list_head mfc_unres_queue;
75 struct list_head mfc_cache_array[MFC_LINES];
76 struct vif_device vif_table[MAXVIFS];
77 int maxvif;
78 atomic_t cache_resolve_queue_len;
79 int mroute_do_assert;
80 int mroute_do_pim;
81#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
82 int mroute_reg_vif_num;
83#endif
84};
85
71/* Big lock, protecting vif table, mrt cache and mroute socket state. 86/* Big lock, protecting vif table, mrt cache and mroute socket state.
72 Note that the changes are semaphored via rtnl_lock. 87 Note that the changes are semaphored via rtnl_lock.
73 */ 88 */
@@ -78,7 +93,7 @@ static DEFINE_RWLOCK(mrt_lock);
78 * Multicast router control variables 93 * Multicast router control variables
79 */ 94 */
80 95
81#define VIF_EXISTS(_net, _idx) ((_net)->ipv4.vif_table[_idx].dev != NULL) 96#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
82 97
83/* Special spinlock for queue of unresolved entries */ 98/* Special spinlock for queue of unresolved entries */
84static DEFINE_SPINLOCK(mfc_unres_lock); 99static DEFINE_SPINLOCK(mfc_unres_lock);
@@ -93,11 +108,12 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
93 108
94static struct kmem_cache *mrt_cachep __read_mostly; 109static struct kmem_cache *mrt_cachep __read_mostly;
95 110
96static int ip_mr_forward(struct net *net, struct sk_buff *skb, 111static int ip_mr_forward(struct net *net, struct mr_table *mrt,
97 struct mfc_cache *cache, int local); 112 struct sk_buff *skb, struct mfc_cache *cache,
98static int ipmr_cache_report(struct net *net, 113 int local);
114static int ipmr_cache_report(struct mr_table *mrt,
99 struct sk_buff *pkt, vifi_t vifi, int assert); 115 struct sk_buff *pkt, vifi_t vifi, int assert);
100static int ipmr_fill_mroute(struct net *net, struct sk_buff *skb, 116static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
101 struct mfc_cache *c, struct rtmsg *rtm); 117 struct mfc_cache *c, struct rtmsg *rtm);
102 118
103/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ 119/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
@@ -199,12 +215,12 @@ failure:
199static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 215static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
200{ 216{
201 struct net *net = dev_net(dev); 217 struct net *net = dev_net(dev);
218 struct mr_table *mrt = net->ipv4.mrt;
202 219
203 read_lock(&mrt_lock); 220 read_lock(&mrt_lock);
204 dev->stats.tx_bytes += skb->len; 221 dev->stats.tx_bytes += skb->len;
205 dev->stats.tx_packets++; 222 dev->stats.tx_packets++;
206 ipmr_cache_report(net, skb, net->ipv4.mroute_reg_vif_num, 223 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
207 IGMPMSG_WHOLEPKT);
208 read_unlock(&mrt_lock); 224 read_unlock(&mrt_lock);
209 kfree_skb(skb); 225 kfree_skb(skb);
210 return NETDEV_TX_OK; 226 return NETDEV_TX_OK;
@@ -274,17 +290,17 @@ failure:
274 * @notify: Set to 1, if the caller is a notifier_call 290 * @notify: Set to 1, if the caller is a notifier_call
275 */ 291 */
276 292
277static int vif_delete(struct net *net, int vifi, int notify, 293static int vif_delete(struct mr_table *mrt, int vifi, int notify,
278 struct list_head *head) 294 struct list_head *head)
279{ 295{
280 struct vif_device *v; 296 struct vif_device *v;
281 struct net_device *dev; 297 struct net_device *dev;
282 struct in_device *in_dev; 298 struct in_device *in_dev;
283 299
284 if (vifi < 0 || vifi >= net->ipv4.maxvif) 300 if (vifi < 0 || vifi >= mrt->maxvif)
285 return -EADDRNOTAVAIL; 301 return -EADDRNOTAVAIL;
286 302
287 v = &net->ipv4.vif_table[vifi]; 303 v = &mrt->vif_table[vifi];
288 304
289 write_lock_bh(&mrt_lock); 305 write_lock_bh(&mrt_lock);
290 dev = v->dev; 306 dev = v->dev;
@@ -296,17 +312,17 @@ static int vif_delete(struct net *net, int vifi, int notify,
296 } 312 }
297 313
298#ifdef CONFIG_IP_PIMSM 314#ifdef CONFIG_IP_PIMSM
299 if (vifi == net->ipv4.mroute_reg_vif_num) 315 if (vifi == mrt->mroute_reg_vif_num)
300 net->ipv4.mroute_reg_vif_num = -1; 316 mrt->mroute_reg_vif_num = -1;
301#endif 317#endif
302 318
303 if (vifi+1 == net->ipv4.maxvif) { 319 if (vifi+1 == mrt->maxvif) {
304 int tmp; 320 int tmp;
305 for (tmp=vifi-1; tmp>=0; tmp--) { 321 for (tmp=vifi-1; tmp>=0; tmp--) {
306 if (VIF_EXISTS(net, tmp)) 322 if (VIF_EXISTS(mrt, tmp))
307 break; 323 break;
308 } 324 }
309 net->ipv4.maxvif = tmp+1; 325 mrt->maxvif = tmp+1;
310 } 326 }
311 327
312 write_unlock_bh(&mrt_lock); 328 write_unlock_bh(&mrt_lock);
@@ -334,12 +350,13 @@ static inline void ipmr_cache_free(struct mfc_cache *c)
334 and reporting error to netlink readers. 350 and reporting error to netlink readers.
335 */ 351 */
336 352
337static void ipmr_destroy_unres(struct net *net, struct mfc_cache *c) 353static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
338{ 354{
355 struct net *net = NULL; //mrt->net;
339 struct sk_buff *skb; 356 struct sk_buff *skb;
340 struct nlmsgerr *e; 357 struct nlmsgerr *e;
341 358
342 atomic_dec(&net->ipv4.cache_resolve_queue_len); 359 atomic_dec(&mrt->cache_resolve_queue_len);
343 360
344 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { 361 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
345 if (ip_hdr(skb)->version == 0) { 362 if (ip_hdr(skb)->version == 0) {
@@ -364,23 +381,23 @@ static void ipmr_destroy_unres(struct net *net, struct mfc_cache *c)
364 381
365static void ipmr_expire_process(unsigned long arg) 382static void ipmr_expire_process(unsigned long arg)
366{ 383{
367 struct net *net = (struct net *)arg; 384 struct mr_table *mrt = (struct mr_table *)arg;
368 unsigned long now; 385 unsigned long now;
369 unsigned long expires; 386 unsigned long expires;
370 struct mfc_cache *c, *next; 387 struct mfc_cache *c, *next;
371 388
372 if (!spin_trylock(&mfc_unres_lock)) { 389 if (!spin_trylock(&mfc_unres_lock)) {
373 mod_timer(&net->ipv4.ipmr_expire_timer, jiffies+HZ/10); 390 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
374 return; 391 return;
375 } 392 }
376 393
377 if (list_empty(&net->ipv4.mfc_unres_queue)) 394 if (list_empty(&mrt->mfc_unres_queue))
378 goto out; 395 goto out;
379 396
380 now = jiffies; 397 now = jiffies;
381 expires = 10*HZ; 398 expires = 10*HZ;
382 399
383 list_for_each_entry_safe(c, next, &net->ipv4.mfc_unres_queue, list) { 400 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
384 if (time_after(c->mfc_un.unres.expires, now)) { 401 if (time_after(c->mfc_un.unres.expires, now)) {
385 unsigned long interval = c->mfc_un.unres.expires - now; 402 unsigned long interval = c->mfc_un.unres.expires - now;
386 if (interval < expires) 403 if (interval < expires)
@@ -389,11 +406,11 @@ static void ipmr_expire_process(unsigned long arg)
389 } 406 }
390 407
391 list_del(&c->list); 408 list_del(&c->list);
392 ipmr_destroy_unres(net, c); 409 ipmr_destroy_unres(mrt, c);
393 } 410 }
394 411
395 if (!list_empty(&net->ipv4.mfc_unres_queue)) 412 if (!list_empty(&mrt->mfc_unres_queue))
396 mod_timer(&net->ipv4.ipmr_expire_timer, jiffies + expires); 413 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
397 414
398out: 415out:
399 spin_unlock(&mfc_unres_lock); 416 spin_unlock(&mfc_unres_lock);
@@ -401,7 +418,7 @@ out:
401 418
402/* Fill oifs list. It is called under write locked mrt_lock. */ 419/* Fill oifs list. It is called under write locked mrt_lock. */
403 420
404static void ipmr_update_thresholds(struct net *net, struct mfc_cache *cache, 421static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
405 unsigned char *ttls) 422 unsigned char *ttls)
406{ 423{
407 int vifi; 424 int vifi;
@@ -410,8 +427,8 @@ static void ipmr_update_thresholds(struct net *net, struct mfc_cache *cache,
410 cache->mfc_un.res.maxvif = 0; 427 cache->mfc_un.res.maxvif = 0;
411 memset(cache->mfc_un.res.ttls, 255, MAXVIFS); 428 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
412 429
413 for (vifi = 0; vifi < net->ipv4.maxvif; vifi++) { 430 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
414 if (VIF_EXISTS(net, vifi) && 431 if (VIF_EXISTS(mrt, vifi) &&
415 ttls[vifi] && ttls[vifi] < 255) { 432 ttls[vifi] && ttls[vifi] < 255) {
416 cache->mfc_un.res.ttls[vifi] = ttls[vifi]; 433 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
417 if (cache->mfc_un.res.minvif > vifi) 434 if (cache->mfc_un.res.minvif > vifi)
@@ -422,16 +439,17 @@ static void ipmr_update_thresholds(struct net *net, struct mfc_cache *cache,
422 } 439 }
423} 440}
424 441
425static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) 442static int vif_add(struct net *net, struct mr_table *mrt,
443 struct vifctl *vifc, int mrtsock)
426{ 444{
427 int vifi = vifc->vifc_vifi; 445 int vifi = vifc->vifc_vifi;
428 struct vif_device *v = &net->ipv4.vif_table[vifi]; 446 struct vif_device *v = &mrt->vif_table[vifi];
429 struct net_device *dev; 447 struct net_device *dev;
430 struct in_device *in_dev; 448 struct in_device *in_dev;
431 int err; 449 int err;
432 450
433 /* Is vif busy ? */ 451 /* Is vif busy ? */
434 if (VIF_EXISTS(net, vifi)) 452 if (VIF_EXISTS(mrt, vifi))
435 return -EADDRINUSE; 453 return -EADDRINUSE;
436 454
437 switch (vifc->vifc_flags) { 455 switch (vifc->vifc_flags) {
@@ -441,7 +459,7 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
441 * Special Purpose VIF in PIM 459 * Special Purpose VIF in PIM
442 * All the packets will be sent to the daemon 460 * All the packets will be sent to the daemon
443 */ 461 */
444 if (net->ipv4.mroute_reg_vif_num >= 0) 462 if (mrt->mroute_reg_vif_num >= 0)
445 return -EADDRINUSE; 463 return -EADDRINUSE;
446 dev = ipmr_reg_vif(net); 464 dev = ipmr_reg_vif(net);
447 if (!dev) 465 if (!dev)
@@ -519,22 +537,22 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
519 v->dev = dev; 537 v->dev = dev;
520#ifdef CONFIG_IP_PIMSM 538#ifdef CONFIG_IP_PIMSM
521 if (v->flags&VIFF_REGISTER) 539 if (v->flags&VIFF_REGISTER)
522 net->ipv4.mroute_reg_vif_num = vifi; 540 mrt->mroute_reg_vif_num = vifi;
523#endif 541#endif
524 if (vifi+1 > net->ipv4.maxvif) 542 if (vifi+1 > mrt->maxvif)
525 net->ipv4.maxvif = vifi+1; 543 mrt->maxvif = vifi+1;
526 write_unlock_bh(&mrt_lock); 544 write_unlock_bh(&mrt_lock);
527 return 0; 545 return 0;
528} 546}
529 547
530static struct mfc_cache *ipmr_cache_find(struct net *net, 548static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
531 __be32 origin, 549 __be32 origin,
532 __be32 mcastgrp) 550 __be32 mcastgrp)
533{ 551{
534 int line = MFC_HASH(mcastgrp, origin); 552 int line = MFC_HASH(mcastgrp, origin);
535 struct mfc_cache *c; 553 struct mfc_cache *c;
536 554
537 list_for_each_entry(c, &net->ipv4.mfc_cache_array[line], list) { 555 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
538 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp) 556 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
539 return c; 557 return c;
540 } 558 }
@@ -567,8 +585,8 @@ static struct mfc_cache *ipmr_cache_alloc_unres(void)
567 * A cache entry has gone into a resolved state from queued 585 * A cache entry has gone into a resolved state from queued
568 */ 586 */
569 587
570static void ipmr_cache_resolve(struct net *net, struct mfc_cache *uc, 588static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
571 struct mfc_cache *c) 589 struct mfc_cache *uc, struct mfc_cache *c)
572{ 590{
573 struct sk_buff *skb; 591 struct sk_buff *skb;
574 struct nlmsgerr *e; 592 struct nlmsgerr *e;
@@ -581,7 +599,7 @@ static void ipmr_cache_resolve(struct net *net, struct mfc_cache *uc,
581 if (ip_hdr(skb)->version == 0) { 599 if (ip_hdr(skb)->version == 0) {
582 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 600 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
583 601
584 if (ipmr_fill_mroute(net, skb, c, NLMSG_DATA(nlh)) > 0) { 602 if (ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
585 nlh->nlmsg_len = (skb_tail_pointer(skb) - 603 nlh->nlmsg_len = (skb_tail_pointer(skb) -
586 (u8 *)nlh); 604 (u8 *)nlh);
587 } else { 605 } else {
@@ -595,7 +613,7 @@ static void ipmr_cache_resolve(struct net *net, struct mfc_cache *uc,
595 613
596 rtnl_unicast(skb, net, NETLINK_CB(skb).pid); 614 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
597 } else 615 } else
598 ip_mr_forward(net, skb, c, 0); 616 ip_mr_forward(net, mrt, skb, c, 0);
599 } 617 }
600} 618}
601 619
@@ -606,7 +624,7 @@ static void ipmr_cache_resolve(struct net *net, struct mfc_cache *uc,
606 * Called under mrt_lock. 624 * Called under mrt_lock.
607 */ 625 */
608 626
609static int ipmr_cache_report(struct net *net, 627static int ipmr_cache_report(struct mr_table *mrt,
610 struct sk_buff *pkt, vifi_t vifi, int assert) 628 struct sk_buff *pkt, vifi_t vifi, int assert)
611{ 629{
612 struct sk_buff *skb; 630 struct sk_buff *skb;
@@ -639,7 +657,7 @@ static int ipmr_cache_report(struct net *net,
639 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); 657 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
640 msg->im_msgtype = IGMPMSG_WHOLEPKT; 658 msg->im_msgtype = IGMPMSG_WHOLEPKT;
641 msg->im_mbz = 0; 659 msg->im_mbz = 0;
642 msg->im_vif = net->ipv4.mroute_reg_vif_num; 660 msg->im_vif = mrt->mroute_reg_vif_num;
643 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; 661 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
644 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + 662 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
645 sizeof(struct iphdr)); 663 sizeof(struct iphdr));
@@ -671,7 +689,7 @@ static int ipmr_cache_report(struct net *net,
671 skb->transport_header = skb->network_header; 689 skb->transport_header = skb->network_header;
672 } 690 }
673 691
674 if (net->ipv4.mroute_sk == NULL) { 692 if (mrt->mroute_sk == NULL) {
675 kfree_skb(skb); 693 kfree_skb(skb);
676 return -EINVAL; 694 return -EINVAL;
677 } 695 }
@@ -679,7 +697,7 @@ static int ipmr_cache_report(struct net *net,
679 /* 697 /*
680 * Deliver to mrouted 698 * Deliver to mrouted
681 */ 699 */
682 ret = sock_queue_rcv_skb(net->ipv4.mroute_sk, skb); 700 ret = sock_queue_rcv_skb(mrt->mroute_sk, skb);
683 if (ret < 0) { 701 if (ret < 0) {
684 if (net_ratelimit()) 702 if (net_ratelimit())
685 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); 703 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
@@ -694,7 +712,7 @@ static int ipmr_cache_report(struct net *net,
694 */ 712 */
695 713
696static int 714static int
697ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) 715ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
698{ 716{
699 bool found = false; 717 bool found = false;
700 int err; 718 int err;
@@ -702,7 +720,7 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
702 const struct iphdr *iph = ip_hdr(skb); 720 const struct iphdr *iph = ip_hdr(skb);
703 721
704 spin_lock_bh(&mfc_unres_lock); 722 spin_lock_bh(&mfc_unres_lock);
705 list_for_each_entry(c, &net->ipv4.mfc_unres_queue, list) { 723 list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
706 if (c->mfc_mcastgrp == iph->daddr && 724 if (c->mfc_mcastgrp == iph->daddr &&
707 c->mfc_origin == iph->saddr) { 725 c->mfc_origin == iph->saddr) {
708 found = true; 726 found = true;
@@ -715,7 +733,7 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
715 * Create a new entry if allowable 733 * Create a new entry if allowable
716 */ 734 */
717 735
718 if (atomic_read(&net->ipv4.cache_resolve_queue_len) >= 10 || 736 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
719 (c = ipmr_cache_alloc_unres()) == NULL) { 737 (c = ipmr_cache_alloc_unres()) == NULL) {
720 spin_unlock_bh(&mfc_unres_lock); 738 spin_unlock_bh(&mfc_unres_lock);
721 739
@@ -733,7 +751,7 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
733 /* 751 /*
734 * Reflect first query at mrouted. 752 * Reflect first query at mrouted.
735 */ 753 */
736 err = ipmr_cache_report(net, skb, vifi, IGMPMSG_NOCACHE); 754 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
737 if (err < 0) { 755 if (err < 0) {
738 /* If the report failed throw the cache entry 756 /* If the report failed throw the cache entry
739 out - Brad Parker 757 out - Brad Parker
@@ -745,10 +763,10 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
745 return err; 763 return err;
746 } 764 }
747 765
748 atomic_inc(&net->ipv4.cache_resolve_queue_len); 766 atomic_inc(&mrt->cache_resolve_queue_len);
749 list_add(&c->list, &net->ipv4.mfc_unres_queue); 767 list_add(&c->list, &mrt->mfc_unres_queue);
750 768
751 mod_timer(&net->ipv4.ipmr_expire_timer, c->mfc_un.unres.expires); 769 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
752 } 770 }
753 771
754 /* 772 /*
@@ -770,14 +788,14 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
770 * MFC cache manipulation by user space mroute daemon 788 * MFC cache manipulation by user space mroute daemon
771 */ 789 */
772 790
773static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc) 791static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
774{ 792{
775 int line; 793 int line;
776 struct mfc_cache *c, *next; 794 struct mfc_cache *c, *next;
777 795
778 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 796 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
779 797
780 list_for_each_entry_safe(c, next, &net->ipv4.mfc_cache_array[line], list) { 798 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
781 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 799 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
782 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { 800 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
783 write_lock_bh(&mrt_lock); 801 write_lock_bh(&mrt_lock);
@@ -791,7 +809,8 @@ static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc)
791 return -ENOENT; 809 return -ENOENT;
792} 810}
793 811
794static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) 812static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
813 struct mfcctl *mfc, int mrtsock)
795{ 814{
796 bool found = false; 815 bool found = false;
797 int line; 816 int line;
@@ -802,7 +821,7 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
802 821
803 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 822 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
804 823
805 list_for_each_entry(c, &net->ipv4.mfc_cache_array[line], list) { 824 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
806 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 825 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
807 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { 826 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
808 found = true; 827 found = true;
@@ -813,7 +832,7 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
813 if (found) { 832 if (found) {
814 write_lock_bh(&mrt_lock); 833 write_lock_bh(&mrt_lock);
815 c->mfc_parent = mfc->mfcc_parent; 834 c->mfc_parent = mfc->mfcc_parent;
816 ipmr_update_thresholds(net, c, mfc->mfcc_ttls); 835 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
817 if (!mrtsock) 836 if (!mrtsock)
818 c->mfc_flags |= MFC_STATIC; 837 c->mfc_flags |= MFC_STATIC;
819 write_unlock_bh(&mrt_lock); 838 write_unlock_bh(&mrt_lock);
@@ -830,12 +849,12 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
830 c->mfc_origin = mfc->mfcc_origin.s_addr; 849 c->mfc_origin = mfc->mfcc_origin.s_addr;
831 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr; 850 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
832 c->mfc_parent = mfc->mfcc_parent; 851 c->mfc_parent = mfc->mfcc_parent;
833 ipmr_update_thresholds(net, c, mfc->mfcc_ttls); 852 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
834 if (!mrtsock) 853 if (!mrtsock)
835 c->mfc_flags |= MFC_STATIC; 854 c->mfc_flags |= MFC_STATIC;
836 855
837 write_lock_bh(&mrt_lock); 856 write_lock_bh(&mrt_lock);
838 list_add(&c->list, &net->ipv4.mfc_cache_array[line]); 857 list_add(&c->list, &mrt->mfc_cache_array[line]);
839 write_unlock_bh(&mrt_lock); 858 write_unlock_bh(&mrt_lock);
840 859
841 /* 860 /*
@@ -843,20 +862,20 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
843 * need to send on the frames and tidy up. 862 * need to send on the frames and tidy up.
844 */ 863 */
845 spin_lock_bh(&mfc_unres_lock); 864 spin_lock_bh(&mfc_unres_lock);
846 list_for_each_entry(uc, &net->ipv4.mfc_unres_queue, list) { 865 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
847 if (uc->mfc_origin == c->mfc_origin && 866 if (uc->mfc_origin == c->mfc_origin &&
848 uc->mfc_mcastgrp == c->mfc_mcastgrp) { 867 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
849 list_del(&uc->list); 868 list_del(&uc->list);
850 atomic_dec(&net->ipv4.cache_resolve_queue_len); 869 atomic_dec(&mrt->cache_resolve_queue_len);
851 break; 870 break;
852 } 871 }
853 } 872 }
854 if (list_empty(&net->ipv4.mfc_unres_queue)) 873 if (list_empty(&mrt->mfc_unres_queue))
855 del_timer(&net->ipv4.ipmr_expire_timer); 874 del_timer(&mrt->ipmr_expire_timer);
856 spin_unlock_bh(&mfc_unres_lock); 875 spin_unlock_bh(&mfc_unres_lock);
857 876
858 if (uc) { 877 if (uc) {
859 ipmr_cache_resolve(net, uc, c); 878 ipmr_cache_resolve(net, mrt, uc, c);
860 ipmr_cache_free(uc); 879 ipmr_cache_free(uc);
861 } 880 }
862 return 0; 881 return 0;
@@ -866,7 +885,7 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
866 * Close the multicast socket, and clear the vif tables etc 885 * Close the multicast socket, and clear the vif tables etc
867 */ 886 */
868 887
869static void mroute_clean_tables(struct net *net) 888static void mroute_clean_tables(struct mr_table *mrt)
870{ 889{
871 int i; 890 int i;
872 LIST_HEAD(list); 891 LIST_HEAD(list);
@@ -875,9 +894,9 @@ static void mroute_clean_tables(struct net *net)
875 /* 894 /*
876 * Shut down all active vif entries 895 * Shut down all active vif entries
877 */ 896 */
878 for (i = 0; i < net->ipv4.maxvif; i++) { 897 for (i = 0; i < mrt->maxvif; i++) {
879 if (!(net->ipv4.vif_table[i].flags&VIFF_STATIC)) 898 if (!(mrt->vif_table[i].flags&VIFF_STATIC))
880 vif_delete(net, i, 0, &list); 899 vif_delete(mrt, i, 0, &list);
881 } 900 }
882 unregister_netdevice_many(&list); 901 unregister_netdevice_many(&list);
883 902
@@ -885,7 +904,7 @@ static void mroute_clean_tables(struct net *net)
885 * Wipe the cache 904 * Wipe the cache
886 */ 905 */
887 for (i = 0; i < MFC_LINES; i++) { 906 for (i = 0; i < MFC_LINES; i++) {
888 list_for_each_entry_safe(c, next, &net->ipv4.mfc_cache_array[i], list) { 907 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
889 if (c->mfc_flags&MFC_STATIC) 908 if (c->mfc_flags&MFC_STATIC)
890 continue; 909 continue;
891 write_lock_bh(&mrt_lock); 910 write_lock_bh(&mrt_lock);
@@ -896,11 +915,11 @@ static void mroute_clean_tables(struct net *net)
896 } 915 }
897 } 916 }
898 917
899 if (atomic_read(&net->ipv4.cache_resolve_queue_len) != 0) { 918 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
900 spin_lock_bh(&mfc_unres_lock); 919 spin_lock_bh(&mfc_unres_lock);
901 list_for_each_entry_safe(c, next, &net->ipv4.mfc_unres_queue, list) { 920 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
902 list_del(&c->list); 921 list_del(&c->list);
903 ipmr_destroy_unres(net, c); 922 ipmr_destroy_unres(mrt, c);
904 } 923 }
905 spin_unlock_bh(&mfc_unres_lock); 924 spin_unlock_bh(&mfc_unres_lock);
906 } 925 }
@@ -909,16 +928,17 @@ static void mroute_clean_tables(struct net *net)
909static void mrtsock_destruct(struct sock *sk) 928static void mrtsock_destruct(struct sock *sk)
910{ 929{
911 struct net *net = sock_net(sk); 930 struct net *net = sock_net(sk);
931 struct mr_table *mrt = net->ipv4.mrt;
912 932
913 rtnl_lock(); 933 rtnl_lock();
914 if (sk == net->ipv4.mroute_sk) { 934 if (sk == mrt->mroute_sk) {
915 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; 935 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
916 936
917 write_lock_bh(&mrt_lock); 937 write_lock_bh(&mrt_lock);
918 net->ipv4.mroute_sk = NULL; 938 mrt->mroute_sk = NULL;
919 write_unlock_bh(&mrt_lock); 939 write_unlock_bh(&mrt_lock);
920 940
921 mroute_clean_tables(net); 941 mroute_clean_tables(mrt);
922 } 942 }
923 rtnl_unlock(); 943 rtnl_unlock();
924} 944}
@@ -936,9 +956,10 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
936 struct vifctl vif; 956 struct vifctl vif;
937 struct mfcctl mfc; 957 struct mfcctl mfc;
938 struct net *net = sock_net(sk); 958 struct net *net = sock_net(sk);
959 struct mr_table *mrt = net->ipv4.mrt;
939 960
940 if (optname != MRT_INIT) { 961 if (optname != MRT_INIT) {
941 if (sk != net->ipv4.mroute_sk && !capable(CAP_NET_ADMIN)) 962 if (sk != mrt->mroute_sk && !capable(CAP_NET_ADMIN))
942 return -EACCES; 963 return -EACCES;
943 } 964 }
944 965
@@ -951,7 +972,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
951 return -ENOPROTOOPT; 972 return -ENOPROTOOPT;
952 973
953 rtnl_lock(); 974 rtnl_lock();
954 if (net->ipv4.mroute_sk) { 975 if (mrt->mroute_sk) {
955 rtnl_unlock(); 976 rtnl_unlock();
956 return -EADDRINUSE; 977 return -EADDRINUSE;
957 } 978 }
@@ -959,7 +980,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
959 ret = ip_ra_control(sk, 1, mrtsock_destruct); 980 ret = ip_ra_control(sk, 1, mrtsock_destruct);
960 if (ret == 0) { 981 if (ret == 0) {
961 write_lock_bh(&mrt_lock); 982 write_lock_bh(&mrt_lock);
962 net->ipv4.mroute_sk = sk; 983 mrt->mroute_sk = sk;
963 write_unlock_bh(&mrt_lock); 984 write_unlock_bh(&mrt_lock);
964 985
965 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; 986 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
@@ -967,7 +988,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
967 rtnl_unlock(); 988 rtnl_unlock();
968 return ret; 989 return ret;
969 case MRT_DONE: 990 case MRT_DONE:
970 if (sk != net->ipv4.mroute_sk) 991 if (sk != mrt->mroute_sk)
971 return -EACCES; 992 return -EACCES;
972 return ip_ra_control(sk, 0, NULL); 993 return ip_ra_control(sk, 0, NULL);
973 case MRT_ADD_VIF: 994 case MRT_ADD_VIF:
@@ -980,9 +1001,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
980 return -ENFILE; 1001 return -ENFILE;
981 rtnl_lock(); 1002 rtnl_lock();
982 if (optname == MRT_ADD_VIF) { 1003 if (optname == MRT_ADD_VIF) {
983 ret = vif_add(net, &vif, sk == net->ipv4.mroute_sk); 1004 ret = vif_add(net, mrt, &vif, sk == mrt->mroute_sk);
984 } else { 1005 } else {
985 ret = vif_delete(net, vif.vifc_vifi, 0, NULL); 1006 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
986 } 1007 }
987 rtnl_unlock(); 1008 rtnl_unlock();
988 return ret; 1009 return ret;
@@ -999,9 +1020,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
999 return -EFAULT; 1020 return -EFAULT;
1000 rtnl_lock(); 1021 rtnl_lock();
1001 if (optname == MRT_DEL_MFC) 1022 if (optname == MRT_DEL_MFC)
1002 ret = ipmr_mfc_delete(net, &mfc); 1023 ret = ipmr_mfc_delete(mrt, &mfc);
1003 else 1024 else
1004 ret = ipmr_mfc_add(net, &mfc, sk == net->ipv4.mroute_sk); 1025 ret = ipmr_mfc_add(net, mrt, &mfc, sk == mrt->mroute_sk);
1005 rtnl_unlock(); 1026 rtnl_unlock();
1006 return ret; 1027 return ret;
1007 /* 1028 /*
@@ -1012,7 +1033,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1012 int v; 1033 int v;
1013 if (get_user(v,(int __user *)optval)) 1034 if (get_user(v,(int __user *)optval))
1014 return -EFAULT; 1035 return -EFAULT;
1015 net->ipv4.mroute_do_assert = (v) ? 1 : 0; 1036 mrt->mroute_do_assert = (v) ? 1 : 0;
1016 return 0; 1037 return 0;
1017 } 1038 }
1018#ifdef CONFIG_IP_PIMSM 1039#ifdef CONFIG_IP_PIMSM
@@ -1026,9 +1047,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1026 1047
1027 rtnl_lock(); 1048 rtnl_lock();
1028 ret = 0; 1049 ret = 0;
1029 if (v != net->ipv4.mroute_do_pim) { 1050 if (v != mrt->mroute_do_pim) {
1030 net->ipv4.mroute_do_pim = v; 1051 mrt->mroute_do_pim = v;
1031 net->ipv4.mroute_do_assert = v; 1052 mrt->mroute_do_assert = v;
1032 } 1053 }
1033 rtnl_unlock(); 1054 rtnl_unlock();
1034 return ret; 1055 return ret;
@@ -1052,6 +1073,7 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
1052 int olr; 1073 int olr;
1053 int val; 1074 int val;
1054 struct net *net = sock_net(sk); 1075 struct net *net = sock_net(sk);
1076 struct mr_table *mrt = net->ipv4.mrt;
1055 1077
1056 if (optname != MRT_VERSION && 1078 if (optname != MRT_VERSION &&
1057#ifdef CONFIG_IP_PIMSM 1079#ifdef CONFIG_IP_PIMSM
@@ -1073,10 +1095,10 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
1073 val = 0x0305; 1095 val = 0x0305;
1074#ifdef CONFIG_IP_PIMSM 1096#ifdef CONFIG_IP_PIMSM
1075 else if (optname == MRT_PIM) 1097 else if (optname == MRT_PIM)
1076 val = net->ipv4.mroute_do_pim; 1098 val = mrt->mroute_do_pim;
1077#endif 1099#endif
1078 else 1100 else
1079 val = net->ipv4.mroute_do_assert; 1101 val = mrt->mroute_do_assert;
1080 if (copy_to_user(optval, &val, olr)) 1102 if (copy_to_user(optval, &val, olr))
1081 return -EFAULT; 1103 return -EFAULT;
1082 return 0; 1104 return 0;
@@ -1093,16 +1115,17 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1093 struct vif_device *vif; 1115 struct vif_device *vif;
1094 struct mfc_cache *c; 1116 struct mfc_cache *c;
1095 struct net *net = sock_net(sk); 1117 struct net *net = sock_net(sk);
1118 struct mr_table *mrt = net->ipv4.mrt;
1096 1119
1097 switch (cmd) { 1120 switch (cmd) {
1098 case SIOCGETVIFCNT: 1121 case SIOCGETVIFCNT:
1099 if (copy_from_user(&vr, arg, sizeof(vr))) 1122 if (copy_from_user(&vr, arg, sizeof(vr)))
1100 return -EFAULT; 1123 return -EFAULT;
1101 if (vr.vifi >= net->ipv4.maxvif) 1124 if (vr.vifi >= mrt->maxvif)
1102 return -EINVAL; 1125 return -EINVAL;
1103 read_lock(&mrt_lock); 1126 read_lock(&mrt_lock);
1104 vif = &net->ipv4.vif_table[vr.vifi]; 1127 vif = &mrt->vif_table[vr.vifi];
1105 if (VIF_EXISTS(net, vr.vifi)) { 1128 if (VIF_EXISTS(mrt, vr.vifi)) {
1106 vr.icount = vif->pkt_in; 1129 vr.icount = vif->pkt_in;
1107 vr.ocount = vif->pkt_out; 1130 vr.ocount = vif->pkt_out;
1108 vr.ibytes = vif->bytes_in; 1131 vr.ibytes = vif->bytes_in;
@@ -1120,7 +1143,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1120 return -EFAULT; 1143 return -EFAULT;
1121 1144
1122 read_lock(&mrt_lock); 1145 read_lock(&mrt_lock);
1123 c = ipmr_cache_find(net, sr.src.s_addr, sr.grp.s_addr); 1146 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1124 if (c) { 1147 if (c) {
1125 sr.pktcnt = c->mfc_un.res.pkt; 1148 sr.pktcnt = c->mfc_un.res.pkt;
1126 sr.bytecnt = c->mfc_un.res.bytes; 1149 sr.bytecnt = c->mfc_un.res.bytes;
@@ -1143,16 +1166,17 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
1143{ 1166{
1144 struct net_device *dev = ptr; 1167 struct net_device *dev = ptr;
1145 struct net *net = dev_net(dev); 1168 struct net *net = dev_net(dev);
1169 struct mr_table *mrt = net->ipv4.mrt;
1146 struct vif_device *v; 1170 struct vif_device *v;
1147 int ct; 1171 int ct;
1148 LIST_HEAD(list); 1172 LIST_HEAD(list);
1149 1173
1150 if (event != NETDEV_UNREGISTER) 1174 if (event != NETDEV_UNREGISTER)
1151 return NOTIFY_DONE; 1175 return NOTIFY_DONE;
1152 v = &net->ipv4.vif_table[0]; 1176 v = &mrt->vif_table[0];
1153 for (ct = 0; ct < net->ipv4.maxvif; ct++, v++) { 1177 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1154 if (v->dev == dev) 1178 if (v->dev == dev)
1155 vif_delete(net, ct, 1, &list); 1179 vif_delete(mrt, ct, 1, &list);
1156 } 1180 }
1157 unregister_netdevice_many(&list); 1181 unregister_netdevice_many(&list);
1158 return NOTIFY_DONE; 1182 return NOTIFY_DONE;
@@ -1211,11 +1235,11 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
1211 * Processing handlers for ipmr_forward 1235 * Processing handlers for ipmr_forward
1212 */ 1236 */
1213 1237
1214static void ipmr_queue_xmit(struct net *net, struct sk_buff *skb, 1238static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1215 struct mfc_cache *c, int vifi) 1239 struct sk_buff *skb, struct mfc_cache *c, int vifi)
1216{ 1240{
1217 const struct iphdr *iph = ip_hdr(skb); 1241 const struct iphdr *iph = ip_hdr(skb);
1218 struct vif_device *vif = &net->ipv4.vif_table[vifi]; 1242 struct vif_device *vif = &mrt->vif_table[vifi];
1219 struct net_device *dev; 1243 struct net_device *dev;
1220 struct rtable *rt; 1244 struct rtable *rt;
1221 int encap = 0; 1245 int encap = 0;
@@ -1229,7 +1253,7 @@ static void ipmr_queue_xmit(struct net *net, struct sk_buff *skb,
1229 vif->bytes_out += skb->len; 1253 vif->bytes_out += skb->len;
1230 vif->dev->stats.tx_bytes += skb->len; 1254 vif->dev->stats.tx_bytes += skb->len;
1231 vif->dev->stats.tx_packets++; 1255 vif->dev->stats.tx_packets++;
1232 ipmr_cache_report(net, skb, vifi, IGMPMSG_WHOLEPKT); 1256 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1233 goto out_free; 1257 goto out_free;
1234 } 1258 }
1235#endif 1259#endif
@@ -1312,12 +1336,12 @@ out_free:
1312 return; 1336 return;
1313} 1337}
1314 1338
1315static int ipmr_find_vif(struct net_device *dev) 1339static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1316{ 1340{
1317 struct net *net = dev_net(dev);
1318 int ct; 1341 int ct;
1319 for (ct = net->ipv4.maxvif-1; ct >= 0; ct--) { 1342
1320 if (net->ipv4.vif_table[ct].dev == dev) 1343 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1344 if (mrt->vif_table[ct].dev == dev)
1321 break; 1345 break;
1322 } 1346 }
1323 return ct; 1347 return ct;
@@ -1325,8 +1349,9 @@ static int ipmr_find_vif(struct net_device *dev)
1325 1349
1326/* "local" means that we should preserve one skb (for local delivery) */ 1350/* "local" means that we should preserve one skb (for local delivery) */
1327 1351
1328static int ip_mr_forward(struct net *net, struct sk_buff *skb, 1352static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1329 struct mfc_cache *cache, int local) 1353 struct sk_buff *skb, struct mfc_cache *cache,
1354 int local)
1330{ 1355{
1331 int psend = -1; 1356 int psend = -1;
1332 int vif, ct; 1357 int vif, ct;
@@ -1338,7 +1363,7 @@ static int ip_mr_forward(struct net *net, struct sk_buff *skb,
1338 /* 1363 /*
1339 * Wrong interface: drop packet and (maybe) send PIM assert. 1364 * Wrong interface: drop packet and (maybe) send PIM assert.
1340 */ 1365 */
1341 if (net->ipv4.vif_table[vif].dev != skb->dev) { 1366 if (mrt->vif_table[vif].dev != skb->dev) {
1342 int true_vifi; 1367 int true_vifi;
1343 1368
1344 if (skb_rtable(skb)->fl.iif == 0) { 1369 if (skb_rtable(skb)->fl.iif == 0) {
@@ -1357,26 +1382,26 @@ static int ip_mr_forward(struct net *net, struct sk_buff *skb,
1357 } 1382 }
1358 1383
1359 cache->mfc_un.res.wrong_if++; 1384 cache->mfc_un.res.wrong_if++;
1360 true_vifi = ipmr_find_vif(skb->dev); 1385 true_vifi = ipmr_find_vif(mrt, skb->dev);
1361 1386
1362 if (true_vifi >= 0 && net->ipv4.mroute_do_assert && 1387 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1363 /* pimsm uses asserts, when switching from RPT to SPT, 1388 /* pimsm uses asserts, when switching from RPT to SPT,
1364 so that we cannot check that packet arrived on an oif. 1389 so that we cannot check that packet arrived on an oif.
1365 It is bad, but otherwise we would need to move pretty 1390 It is bad, but otherwise we would need to move pretty
1366 large chunk of pimd to kernel. Ough... --ANK 1391 large chunk of pimd to kernel. Ough... --ANK
1367 */ 1392 */
1368 (net->ipv4.mroute_do_pim || 1393 (mrt->mroute_do_pim ||
1369 cache->mfc_un.res.ttls[true_vifi] < 255) && 1394 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1370 time_after(jiffies, 1395 time_after(jiffies,
1371 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { 1396 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1372 cache->mfc_un.res.last_assert = jiffies; 1397 cache->mfc_un.res.last_assert = jiffies;
1373 ipmr_cache_report(net, skb, true_vifi, IGMPMSG_WRONGVIF); 1398 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1374 } 1399 }
1375 goto dont_forward; 1400 goto dont_forward;
1376 } 1401 }
1377 1402
1378 net->ipv4.vif_table[vif].pkt_in++; 1403 mrt->vif_table[vif].pkt_in++;
1379 net->ipv4.vif_table[vif].bytes_in += skb->len; 1404 mrt->vif_table[vif].bytes_in += skb->len;
1380 1405
1381 /* 1406 /*
1382 * Forward the frame 1407 * Forward the frame
@@ -1386,7 +1411,8 @@ static int ip_mr_forward(struct net *net, struct sk_buff *skb,
1386 if (psend != -1) { 1411 if (psend != -1) {
1387 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1412 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1388 if (skb2) 1413 if (skb2)
1389 ipmr_queue_xmit(net, skb2, cache, psend); 1414 ipmr_queue_xmit(net, mrt, skb2, cache,
1415 psend);
1390 } 1416 }
1391 psend = ct; 1417 psend = ct;
1392 } 1418 }
@@ -1395,9 +1421,9 @@ static int ip_mr_forward(struct net *net, struct sk_buff *skb,
1395 if (local) { 1421 if (local) {
1396 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1422 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1397 if (skb2) 1423 if (skb2)
1398 ipmr_queue_xmit(net, skb2, cache, psend); 1424 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1399 } else { 1425 } else {
1400 ipmr_queue_xmit(net, skb, cache, psend); 1426 ipmr_queue_xmit(net, mrt, skb, cache, psend);
1401 return 0; 1427 return 0;
1402 } 1428 }
1403 } 1429 }
@@ -1417,6 +1443,7 @@ int ip_mr_input(struct sk_buff *skb)
1417{ 1443{
1418 struct mfc_cache *cache; 1444 struct mfc_cache *cache;
1419 struct net *net = dev_net(skb->dev); 1445 struct net *net = dev_net(skb->dev);
1446 struct mr_table *mrt = net->ipv4.mrt;
1420 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; 1447 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1421 1448
1422 /* Packet is looped back after forward, it should not be 1449 /* Packet is looped back after forward, it should not be
@@ -1437,9 +1464,9 @@ int ip_mr_input(struct sk_buff *skb)
1437 that we can forward NO IGMP messages. 1464 that we can forward NO IGMP messages.
1438 */ 1465 */
1439 read_lock(&mrt_lock); 1466 read_lock(&mrt_lock);
1440 if (net->ipv4.mroute_sk) { 1467 if (mrt->mroute_sk) {
1441 nf_reset(skb); 1468 nf_reset(skb);
1442 raw_rcv(net->ipv4.mroute_sk, skb); 1469 raw_rcv(mrt->mroute_sk, skb);
1443 read_unlock(&mrt_lock); 1470 read_unlock(&mrt_lock);
1444 return 0; 1471 return 0;
1445 } 1472 }
@@ -1448,7 +1475,7 @@ int ip_mr_input(struct sk_buff *skb)
1448 } 1475 }
1449 1476
1450 read_lock(&mrt_lock); 1477 read_lock(&mrt_lock);
1451 cache = ipmr_cache_find(net, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); 1478 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1452 1479
1453 /* 1480 /*
1454 * No usable cache entry 1481 * No usable cache entry
@@ -1466,9 +1493,9 @@ int ip_mr_input(struct sk_buff *skb)
1466 skb = skb2; 1493 skb = skb2;
1467 } 1494 }
1468 1495
1469 vif = ipmr_find_vif(skb->dev); 1496 vif = ipmr_find_vif(mrt, skb->dev);
1470 if (vif >= 0) { 1497 if (vif >= 0) {
1471 int err = ipmr_cache_unresolved(net, vif, skb); 1498 int err = ipmr_cache_unresolved(mrt, vif, skb);
1472 read_unlock(&mrt_lock); 1499 read_unlock(&mrt_lock);
1473 1500
1474 return err; 1501 return err;
@@ -1478,7 +1505,7 @@ int ip_mr_input(struct sk_buff *skb)
1478 return -ENODEV; 1505 return -ENODEV;
1479 } 1506 }
1480 1507
1481 ip_mr_forward(net, skb, cache, local); 1508 ip_mr_forward(net, mrt, skb, cache, local);
1482 1509
1483 read_unlock(&mrt_lock); 1510 read_unlock(&mrt_lock);
1484 1511
@@ -1500,6 +1527,7 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
1500 struct net_device *reg_dev = NULL; 1527 struct net_device *reg_dev = NULL;
1501 struct iphdr *encap; 1528 struct iphdr *encap;
1502 struct net *net = dev_net(skb->dev); 1529 struct net *net = dev_net(skb->dev);
1530 struct mr_table *mrt = net->ipv4.mrt;
1503 1531
1504 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); 1532 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
1505 /* 1533 /*
@@ -1514,8 +1542,8 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
1514 return 1; 1542 return 1;
1515 1543
1516 read_lock(&mrt_lock); 1544 read_lock(&mrt_lock);
1517 if (net->ipv4.mroute_reg_vif_num >= 0) 1545 if (mrt->mroute_reg_vif_num >= 0)
1518 reg_dev = net->ipv4.vif_table[net->ipv4.mroute_reg_vif_num].dev; 1546 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
1519 if (reg_dev) 1547 if (reg_dev)
1520 dev_hold(reg_dev); 1548 dev_hold(reg_dev);
1521 read_unlock(&mrt_lock); 1549 read_unlock(&mrt_lock);
@@ -1550,13 +1578,14 @@ int pim_rcv_v1(struct sk_buff * skb)
1550{ 1578{
1551 struct igmphdr *pim; 1579 struct igmphdr *pim;
1552 struct net *net = dev_net(skb->dev); 1580 struct net *net = dev_net(skb->dev);
1581 struct mr_table *mrt = net->ipv4.mrt;
1553 1582
1554 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) 1583 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1555 goto drop; 1584 goto drop;
1556 1585
1557 pim = igmp_hdr(skb); 1586 pim = igmp_hdr(skb);
1558 1587
1559 if (!net->ipv4.mroute_do_pim || 1588 if (!mrt->mroute_do_pim ||
1560 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) 1589 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1561 goto drop; 1590 goto drop;
1562 1591
@@ -1592,7 +1621,7 @@ drop:
1592#endif 1621#endif
1593 1622
1594static int 1623static int
1595ipmr_fill_mroute(struct net *net, struct sk_buff *skb, struct mfc_cache *c, 1624ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c,
1596 struct rtmsg *rtm) 1625 struct rtmsg *rtm)
1597{ 1626{
1598 int ct; 1627 int ct;
@@ -1604,19 +1633,19 @@ ipmr_fill_mroute(struct net *net, struct sk_buff *skb, struct mfc_cache *c,
1604 if (c->mfc_parent > MAXVIFS) 1633 if (c->mfc_parent > MAXVIFS)
1605 return -ENOENT; 1634 return -ENOENT;
1606 1635
1607 if (VIF_EXISTS(net, c->mfc_parent)) 1636 if (VIF_EXISTS(mrt, c->mfc_parent))
1608 RTA_PUT(skb, RTA_IIF, 4, &net->ipv4.vif_table[c->mfc_parent].dev->ifindex); 1637 RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex);
1609 1638
1610 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); 1639 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1611 1640
1612 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { 1641 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1613 if (VIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) { 1642 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
1614 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) 1643 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1615 goto rtattr_failure; 1644 goto rtattr_failure;
1616 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 1645 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1617 nhp->rtnh_flags = 0; 1646 nhp->rtnh_flags = 0;
1618 nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; 1647 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1619 nhp->rtnh_ifindex = net->ipv4.vif_table[ct].dev->ifindex; 1648 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
1620 nhp->rtnh_len = sizeof(*nhp); 1649 nhp->rtnh_len = sizeof(*nhp);
1621 } 1650 }
1622 } 1651 }
@@ -1634,11 +1663,12 @@ int ipmr_get_route(struct net *net,
1634 struct sk_buff *skb, struct rtmsg *rtm, int nowait) 1663 struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1635{ 1664{
1636 int err; 1665 int err;
1666 struct mr_table *mrt = net->ipv4.mrt;
1637 struct mfc_cache *cache; 1667 struct mfc_cache *cache;
1638 struct rtable *rt = skb_rtable(skb); 1668 struct rtable *rt = skb_rtable(skb);
1639 1669
1640 read_lock(&mrt_lock); 1670 read_lock(&mrt_lock);
1641 cache = ipmr_cache_find(net, rt->rt_src, rt->rt_dst); 1671 cache = ipmr_cache_find(mrt, rt->rt_src, rt->rt_dst);
1642 1672
1643 if (cache == NULL) { 1673 if (cache == NULL) {
1644 struct sk_buff *skb2; 1674 struct sk_buff *skb2;
@@ -1652,7 +1682,7 @@ int ipmr_get_route(struct net *net,
1652 } 1682 }
1653 1683
1654 dev = skb->dev; 1684 dev = skb->dev;
1655 if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) { 1685 if (dev == NULL || (vif = ipmr_find_vif(mrt, dev)) < 0) {
1656 read_unlock(&mrt_lock); 1686 read_unlock(&mrt_lock);
1657 return -ENODEV; 1687 return -ENODEV;
1658 } 1688 }
@@ -1669,14 +1699,14 @@ int ipmr_get_route(struct net *net,
1669 iph->saddr = rt->rt_src; 1699 iph->saddr = rt->rt_src;
1670 iph->daddr = rt->rt_dst; 1700 iph->daddr = rt->rt_dst;
1671 iph->version = 0; 1701 iph->version = 0;
1672 err = ipmr_cache_unresolved(net, vif, skb2); 1702 err = ipmr_cache_unresolved(mrt, vif, skb2);
1673 read_unlock(&mrt_lock); 1703 read_unlock(&mrt_lock);
1674 return err; 1704 return err;
1675 } 1705 }
1676 1706
1677 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) 1707 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1678 cache->mfc_flags |= MFC_NOTIFY; 1708 cache->mfc_flags |= MFC_NOTIFY;
1679 err = ipmr_fill_mroute(net, skb, cache, rtm); 1709 err = ipmr_fill_mroute(mrt, skb, cache, rtm);
1680 read_unlock(&mrt_lock); 1710 read_unlock(&mrt_lock);
1681 return err; 1711 return err;
1682} 1712}
@@ -1694,11 +1724,13 @@ static struct vif_device *ipmr_vif_seq_idx(struct net *net,
1694 struct ipmr_vif_iter *iter, 1724 struct ipmr_vif_iter *iter,
1695 loff_t pos) 1725 loff_t pos)
1696{ 1726{
1697 for (iter->ct = 0; iter->ct < net->ipv4.maxvif; ++iter->ct) { 1727 struct mr_table *mrt = net->ipv4.mrt;
1698 if (!VIF_EXISTS(net, iter->ct)) 1728
1729 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
1730 if (!VIF_EXISTS(mrt, iter->ct))
1699 continue; 1731 continue;
1700 if (pos-- == 0) 1732 if (pos-- == 0)
1701 return &net->ipv4.vif_table[iter->ct]; 1733 return &mrt->vif_table[iter->ct];
1702 } 1734 }
1703 return NULL; 1735 return NULL;
1704} 1736}
@@ -1717,15 +1749,16 @@ static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1717{ 1749{
1718 struct ipmr_vif_iter *iter = seq->private; 1750 struct ipmr_vif_iter *iter = seq->private;
1719 struct net *net = seq_file_net(seq); 1751 struct net *net = seq_file_net(seq);
1752 struct mr_table *mrt = net->ipv4.mrt;
1720 1753
1721 ++*pos; 1754 ++*pos;
1722 if (v == SEQ_START_TOKEN) 1755 if (v == SEQ_START_TOKEN)
1723 return ipmr_vif_seq_idx(net, iter, 0); 1756 return ipmr_vif_seq_idx(net, iter, 0);
1724 1757
1725 while (++iter->ct < net->ipv4.maxvif) { 1758 while (++iter->ct < mrt->maxvif) {
1726 if (!VIF_EXISTS(net, iter->ct)) 1759 if (!VIF_EXISTS(mrt, iter->ct))
1727 continue; 1760 continue;
1728 return &net->ipv4.vif_table[iter->ct]; 1761 return &mrt->vif_table[iter->ct];
1729 } 1762 }
1730 return NULL; 1763 return NULL;
1731} 1764}
@@ -1739,6 +1772,7 @@ static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
1739static int ipmr_vif_seq_show(struct seq_file *seq, void *v) 1772static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
1740{ 1773{
1741 struct net *net = seq_file_net(seq); 1774 struct net *net = seq_file_net(seq);
1775 struct mr_table *mrt = net->ipv4.mrt;
1742 1776
1743 if (v == SEQ_START_TOKEN) { 1777 if (v == SEQ_START_TOKEN) {
1744 seq_puts(seq, 1778 seq_puts(seq,
@@ -1749,7 +1783,7 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
1749 1783
1750 seq_printf(seq, 1784 seq_printf(seq,
1751 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", 1785 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
1752 vif - net->ipv4.vif_table, 1786 vif - mrt->vif_table,
1753 name, vif->bytes_in, vif->pkt_in, 1787 name, vif->bytes_in, vif->pkt_in,
1754 vif->bytes_out, vif->pkt_out, 1788 vif->bytes_out, vif->pkt_out,
1755 vif->flags, vif->local, vif->remote); 1789 vif->flags, vif->local, vif->remote);
@@ -1788,11 +1822,12 @@ struct ipmr_mfc_iter {
1788static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, 1822static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
1789 struct ipmr_mfc_iter *it, loff_t pos) 1823 struct ipmr_mfc_iter *it, loff_t pos)
1790{ 1824{
1825 struct mr_table *mrt = net->ipv4.mrt;
1791 struct mfc_cache *mfc; 1826 struct mfc_cache *mfc;
1792 1827
1793 read_lock(&mrt_lock); 1828 read_lock(&mrt_lock);
1794 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) { 1829 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
1795 it->cache = &net->ipv4.mfc_cache_array[it->ct]; 1830 it->cache = &mrt->mfc_cache_array[it->ct];
1796 list_for_each_entry(mfc, it->cache, list) 1831 list_for_each_entry(mfc, it->cache, list)
1797 if (pos-- == 0) 1832 if (pos-- == 0)
1798 return mfc; 1833 return mfc;
@@ -1800,7 +1835,7 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
1800 read_unlock(&mrt_lock); 1835 read_unlock(&mrt_lock);
1801 1836
1802 spin_lock_bh(&mfc_unres_lock); 1837 spin_lock_bh(&mfc_unres_lock);
1803 it->cache = &net->ipv4.mfc_unres_queue; 1838 it->cache = &mrt->mfc_unres_queue;
1804 list_for_each_entry(mfc, it->cache, list) 1839 list_for_each_entry(mfc, it->cache, list)
1805 if (pos-- == 0) 1840 if (pos-- == 0)
1806 return mfc; 1841 return mfc;
@@ -1827,6 +1862,7 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1827 struct mfc_cache *mfc = v; 1862 struct mfc_cache *mfc = v;
1828 struct ipmr_mfc_iter *it = seq->private; 1863 struct ipmr_mfc_iter *it = seq->private;
1829 struct net *net = seq_file_net(seq); 1864 struct net *net = seq_file_net(seq);
1865 struct mr_table *mrt = net->ipv4.mrt;
1830 1866
1831 ++*pos; 1867 ++*pos;
1832 1868
@@ -1836,13 +1872,13 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1836 if (mfc->list.next != it->cache) 1872 if (mfc->list.next != it->cache)
1837 return list_entry(mfc->list.next, struct mfc_cache, list); 1873 return list_entry(mfc->list.next, struct mfc_cache, list);
1838 1874
1839 if (it->cache == &net->ipv4.mfc_unres_queue) 1875 if (it->cache == &mrt->mfc_unres_queue)
1840 goto end_of_list; 1876 goto end_of_list;
1841 1877
1842 BUG_ON(it->cache != &net->ipv4.mfc_cache_array[it->ct]); 1878 BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
1843 1879
1844 while (++it->ct < MFC_LINES) { 1880 while (++it->ct < MFC_LINES) {
1845 it->cache = &net->ipv4.mfc_cache_array[it->ct]; 1881 it->cache = &mrt->mfc_cache_array[it->ct];
1846 if (list_empty(it->cache)) 1882 if (list_empty(it->cache))
1847 continue; 1883 continue;
1848 return list_first_entry(it->cache, struct mfc_cache, list); 1884 return list_first_entry(it->cache, struct mfc_cache, list);
@@ -1850,7 +1886,7 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1850 1886
1851 /* exhausted cache_array, show unresolved */ 1887 /* exhausted cache_array, show unresolved */
1852 read_unlock(&mrt_lock); 1888 read_unlock(&mrt_lock);
1853 it->cache = &net->ipv4.mfc_unres_queue; 1889 it->cache = &mrt->mfc_unres_queue;
1854 it->ct = 0; 1890 it->ct = 0;
1855 1891
1856 spin_lock_bh(&mfc_unres_lock); 1892 spin_lock_bh(&mfc_unres_lock);
@@ -1868,10 +1904,11 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
1868{ 1904{
1869 struct ipmr_mfc_iter *it = seq->private; 1905 struct ipmr_mfc_iter *it = seq->private;
1870 struct net *net = seq_file_net(seq); 1906 struct net *net = seq_file_net(seq);
1907 struct mr_table *mrt = net->ipv4.mrt;
1871 1908
1872 if (it->cache == &net->ipv4.mfc_unres_queue) 1909 if (it->cache == &mrt->mfc_unres_queue)
1873 spin_unlock_bh(&mfc_unres_lock); 1910 spin_unlock_bh(&mfc_unres_lock);
1874 else if (it->cache == &net->ipv4.mfc_cache_array[it->ct]) 1911 else if (it->cache == &mrt->mfc_cache_array[it->ct])
1875 read_unlock(&mrt_lock); 1912 read_unlock(&mrt_lock);
1876} 1913}
1877 1914
@@ -1879,6 +1916,7 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
1879{ 1916{
1880 int n; 1917 int n;
1881 struct net *net = seq_file_net(seq); 1918 struct net *net = seq_file_net(seq);
1919 struct mr_table *mrt = net->ipv4.mrt;
1882 1920
1883 if (v == SEQ_START_TOKEN) { 1921 if (v == SEQ_START_TOKEN) {
1884 seq_puts(seq, 1922 seq_puts(seq,
@@ -1892,14 +1930,14 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
1892 (unsigned long) mfc->mfc_origin, 1930 (unsigned long) mfc->mfc_origin,
1893 mfc->mfc_parent); 1931 mfc->mfc_parent);
1894 1932
1895 if (it->cache != &net->ipv4.mfc_unres_queue) { 1933 if (it->cache != &mrt->mfc_unres_queue) {
1896 seq_printf(seq, " %8lu %8lu %8lu", 1934 seq_printf(seq, " %8lu %8lu %8lu",
1897 mfc->mfc_un.res.pkt, 1935 mfc->mfc_un.res.pkt,
1898 mfc->mfc_un.res.bytes, 1936 mfc->mfc_un.res.bytes,
1899 mfc->mfc_un.res.wrong_if); 1937 mfc->mfc_un.res.wrong_if);
1900 for (n = mfc->mfc_un.res.minvif; 1938 for (n = mfc->mfc_un.res.minvif;
1901 n < mfc->mfc_un.res.maxvif; n++ ) { 1939 n < mfc->mfc_un.res.maxvif; n++ ) {
1902 if (VIF_EXISTS(net, n) && 1940 if (VIF_EXISTS(mrt, n) &&
1903 mfc->mfc_un.res.ttls[n] < 255) 1941 mfc->mfc_un.res.ttls[n] < 255)
1904 seq_printf(seq, 1942 seq_printf(seq,
1905 " %2d:%-3d", 1943 " %2d:%-3d",
@@ -1951,35 +1989,27 @@ static const struct net_protocol pim_protocol = {
1951 */ 1989 */
1952static int __net_init ipmr_net_init(struct net *net) 1990static int __net_init ipmr_net_init(struct net *net)
1953{ 1991{
1992 struct mr_table *mrt;
1954 unsigned int i; 1993 unsigned int i;
1955 int err = 0; 1994 int err = 0;
1956 1995
1957 net->ipv4.vif_table = kcalloc(MAXVIFS, sizeof(struct vif_device), 1996 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
1958 GFP_KERNEL); 1997 if (mrt == NULL) {
1959 if (!net->ipv4.vif_table) {
1960 err = -ENOMEM; 1998 err = -ENOMEM;
1961 goto fail; 1999 goto fail;
1962 } 2000 }
1963 2001
1964 /* Forwarding cache */ 2002 /* Forwarding cache */
1965 net->ipv4.mfc_cache_array = kcalloc(MFC_LINES,
1966 sizeof(struct list_head),
1967 GFP_KERNEL);
1968 if (!net->ipv4.mfc_cache_array) {
1969 err = -ENOMEM;
1970 goto fail_mfc_cache;
1971 }
1972
1973 for (i = 0; i < MFC_LINES; i++) 2003 for (i = 0; i < MFC_LINES; i++)
1974 INIT_LIST_HEAD(&net->ipv4.mfc_cache_array[i]); 2004 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
1975 2005
1976 INIT_LIST_HEAD(&net->ipv4.mfc_unres_queue); 2006 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
1977 2007
1978 setup_timer(&net->ipv4.ipmr_expire_timer, ipmr_expire_process, 2008 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
1979 (unsigned long)net); 2009 (unsigned long)net);
1980 2010
1981#ifdef CONFIG_IP_PIMSM 2011#ifdef CONFIG_IP_PIMSM
1982 net->ipv4.mroute_reg_vif_num = -1; 2012 mrt->mroute_reg_vif_num = -1;
1983#endif 2013#endif
1984 2014
1985#ifdef CONFIG_PROC_FS 2015#ifdef CONFIG_PROC_FS
@@ -1989,16 +2019,16 @@ static int __net_init ipmr_net_init(struct net *net)
1989 if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops)) 2019 if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops))
1990 goto proc_cache_fail; 2020 goto proc_cache_fail;
1991#endif 2021#endif
2022
2023 net->ipv4.mrt = mrt;
1992 return 0; 2024 return 0;
1993 2025
1994#ifdef CONFIG_PROC_FS 2026#ifdef CONFIG_PROC_FS
1995proc_cache_fail: 2027proc_cache_fail:
1996 proc_net_remove(net, "ip_mr_vif"); 2028 proc_net_remove(net, "ip_mr_vif");
1997proc_vif_fail: 2029proc_vif_fail:
1998 kfree(net->ipv4.mfc_cache_array); 2030 kfree(mrt);
1999#endif 2031#endif
2000fail_mfc_cache:
2001 kfree(net->ipv4.vif_table);
2002fail: 2032fail:
2003 return err; 2033 return err;
2004} 2034}
@@ -2009,8 +2039,7 @@ static void __net_exit ipmr_net_exit(struct net *net)
2009 proc_net_remove(net, "ip_mr_cache"); 2039 proc_net_remove(net, "ip_mr_cache");
2010 proc_net_remove(net, "ip_mr_vif"); 2040 proc_net_remove(net, "ip_mr_vif");
2011#endif 2041#endif
2012 kfree(net->ipv4.mfc_cache_array); 2042 kfree(net->ipv4.mrt);
2013 kfree(net->ipv4.vif_table);
2014} 2043}
2015 2044
2016static struct pernet_operations ipmr_net_ops = { 2045static struct pernet_operations ipmr_net_ops = {