aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/ip6mr.c
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2010-05-11 08:40:53 -0400
committerPatrick McHardy <kaber@trash.net>2010-05-11 08:40:53 -0400
commit6bd521433942d85e80f7a731a88cc91a327f38e0 (patch)
treee263e39fc6096ef77e2a1c22d5972447785b2aa5 /net/ipv6/ip6mr.c
parentf30a77842129b5656360cc1f5db48a3fcfb64528 (diff)
ipv6: ip6mr: move mroute data into seperate structure
Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net/ipv6/ip6mr.c')
-rw-r--r--net/ipv6/ip6mr.c390
1 files changed, 214 insertions, 176 deletions
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 08e09042ad1c..9419fceeed41 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -51,6 +51,24 @@
51#include <linux/netfilter_ipv6.h> 51#include <linux/netfilter_ipv6.h>
52#include <net/ip6_checksum.h> 52#include <net/ip6_checksum.h>
53 53
54struct mr6_table {
55#ifdef CONFIG_NET_NS
56 struct net *net;
57#endif
58 struct sock *mroute6_sk;
59 struct timer_list ipmr_expire_timer;
60 struct list_head mfc6_unres_queue;
61 struct list_head mfc6_cache_array[MFC6_LINES];
62 struct mif_device vif6_table[MAXMIFS];
63 int maxvif;
64 atomic_t cache_resolve_queue_len;
65 int mroute_do_assert;
66 int mroute_do_pim;
67#ifdef CONFIG_IPV6_PIMSM_V2
68 int mroute_reg_vif_num;
69#endif
70};
71
54/* Big lock, protecting vif table, mrt cache and mroute socket state. 72/* Big lock, protecting vif table, mrt cache and mroute socket state.
55 Note that the changes are semaphored via rtnl_lock. 73 Note that the changes are semaphored via rtnl_lock.
56 */ 74 */
@@ -61,7 +79,7 @@ static DEFINE_RWLOCK(mrt_lock);
61 * Multicast router control variables 79 * Multicast router control variables
62 */ 80 */
63 81
64#define MIF_EXISTS(_net, _idx) ((_net)->ipv6.vif6_table[_idx].dev != NULL) 82#define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
65 83
66/* Special spinlock for queue of unresolved entries */ 84/* Special spinlock for queue of unresolved entries */
67static DEFINE_SPINLOCK(mfc_unres_lock); 85static DEFINE_SPINLOCK(mfc_unres_lock);
@@ -76,13 +94,13 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
76 94
77static struct kmem_cache *mrt_cachep __read_mostly; 95static struct kmem_cache *mrt_cachep __read_mostly;
78 96
79static int ip6_mr_forward(struct net *net, struct sk_buff *skb, 97static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
80 struct mfc6_cache *cache); 98 struct sk_buff *skb, struct mfc6_cache *cache);
81static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, 99static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
82 mifi_t mifi, int assert); 100 mifi_t mifi, int assert);
83static int ip6mr_fill_mroute(struct net *net, struct sk_buff *skb, 101static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
84 struct mfc6_cache *c, struct rtmsg *rtm); 102 struct mfc6_cache *c, struct rtmsg *rtm);
85static void mroute_clean_tables(struct net *net); 103static void mroute_clean_tables(struct mr6_table *mrt);
86 104
87 105
88#ifdef CONFIG_PROC_FS 106#ifdef CONFIG_PROC_FS
@@ -97,11 +115,12 @@ struct ipmr_mfc_iter {
97static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net, 115static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
98 struct ipmr_mfc_iter *it, loff_t pos) 116 struct ipmr_mfc_iter *it, loff_t pos)
99{ 117{
118 struct mr6_table *mrt = net->ipv6.mrt6;
100 struct mfc6_cache *mfc; 119 struct mfc6_cache *mfc;
101 120
102 read_lock(&mrt_lock); 121 read_lock(&mrt_lock);
103 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) { 122 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
104 it->cache = &net->ipv6.mfc6_cache_array[it->ct]; 123 it->cache = &mrt->mfc6_cache_array[it->ct];
105 list_for_each_entry(mfc, it->cache, list) 124 list_for_each_entry(mfc, it->cache, list)
106 if (pos-- == 0) 125 if (pos-- == 0)
107 return mfc; 126 return mfc;
@@ -109,7 +128,7 @@ static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
109 read_unlock(&mrt_lock); 128 read_unlock(&mrt_lock);
110 129
111 spin_lock_bh(&mfc_unres_lock); 130 spin_lock_bh(&mfc_unres_lock);
112 it->cache = &net->ipv6.mfc6_unres_queue; 131 it->cache = &mrt->mfc6_unres_queue;
113 list_for_each_entry(mfc, it->cache, list) 132 list_for_each_entry(mfc, it->cache, list)
114 if (pos-- == 0) 133 if (pos-- == 0)
115 return mfc; 134 return mfc;
@@ -132,11 +151,13 @@ static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
132 struct ipmr_vif_iter *iter, 151 struct ipmr_vif_iter *iter,
133 loff_t pos) 152 loff_t pos)
134{ 153{
135 for (iter->ct = 0; iter->ct < net->ipv6.maxvif; ++iter->ct) { 154 struct mr6_table *mrt = net->ipv6.mrt6;
136 if (!MIF_EXISTS(net, iter->ct)) 155
156 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
157 if (!MIF_EXISTS(mrt, iter->ct))
137 continue; 158 continue;
138 if (pos-- == 0) 159 if (pos-- == 0)
139 return &net->ipv6.vif6_table[iter->ct]; 160 return &mrt->vif6_table[iter->ct];
140 } 161 }
141 return NULL; 162 return NULL;
142} 163}
@@ -155,15 +176,16 @@ static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
155{ 176{
156 struct ipmr_vif_iter *iter = seq->private; 177 struct ipmr_vif_iter *iter = seq->private;
157 struct net *net = seq_file_net(seq); 178 struct net *net = seq_file_net(seq);
179 struct mr6_table *mrt = net->ipv6.mrt6;
158 180
159 ++*pos; 181 ++*pos;
160 if (v == SEQ_START_TOKEN) 182 if (v == SEQ_START_TOKEN)
161 return ip6mr_vif_seq_idx(net, iter, 0); 183 return ip6mr_vif_seq_idx(net, iter, 0);
162 184
163 while (++iter->ct < net->ipv6.maxvif) { 185 while (++iter->ct < mrt->maxvif) {
164 if (!MIF_EXISTS(net, iter->ct)) 186 if (!MIF_EXISTS(mrt, iter->ct))
165 continue; 187 continue;
166 return &net->ipv6.vif6_table[iter->ct]; 188 return &mrt->vif6_table[iter->ct];
167 } 189 }
168 return NULL; 190 return NULL;
169} 191}
@@ -177,6 +199,7 @@ static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
177static int ip6mr_vif_seq_show(struct seq_file *seq, void *v) 199static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
178{ 200{
179 struct net *net = seq_file_net(seq); 201 struct net *net = seq_file_net(seq);
202 struct mr6_table *mrt = net->ipv6.mrt6;
180 203
181 if (v == SEQ_START_TOKEN) { 204 if (v == SEQ_START_TOKEN) {
182 seq_puts(seq, 205 seq_puts(seq,
@@ -187,7 +210,7 @@ static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
187 210
188 seq_printf(seq, 211 seq_printf(seq,
189 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n", 212 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
190 vif - net->ipv6.vif6_table, 213 vif - mrt->vif6_table,
191 name, vif->bytes_in, vif->pkt_in, 214 name, vif->bytes_in, vif->pkt_in,
192 vif->bytes_out, vif->pkt_out, 215 vif->bytes_out, vif->pkt_out,
193 vif->flags); 216 vif->flags);
@@ -229,6 +252,7 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
229 struct mfc6_cache *mfc = v; 252 struct mfc6_cache *mfc = v;
230 struct ipmr_mfc_iter *it = seq->private; 253 struct ipmr_mfc_iter *it = seq->private;
231 struct net *net = seq_file_net(seq); 254 struct net *net = seq_file_net(seq);
255 struct mr6_table *mrt = net->ipv6.mrt6;
232 256
233 ++*pos; 257 ++*pos;
234 258
@@ -238,13 +262,13 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
238 if (mfc->list.next != it->cache) 262 if (mfc->list.next != it->cache)
239 return list_entry(mfc->list.next, struct mfc6_cache, list); 263 return list_entry(mfc->list.next, struct mfc6_cache, list);
240 264
241 if (it->cache == &net->ipv6.mfc6_unres_queue) 265 if (it->cache == &mrt->mfc6_unres_queue)
242 goto end_of_list; 266 goto end_of_list;
243 267
244 BUG_ON(it->cache != &net->ipv6.mfc6_cache_array[it->ct]); 268 BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
245 269
246 while (++it->ct < MFC6_LINES) { 270 while (++it->ct < MFC6_LINES) {
247 it->cache = &net->ipv6.mfc6_cache_array[it->ct]; 271 it->cache = &mrt->mfc6_cache_array[it->ct];
248 if (list_empty(it->cache)) 272 if (list_empty(it->cache))
249 continue; 273 continue;
250 return list_first_entry(it->cache, struct mfc6_cache, list); 274 return list_first_entry(it->cache, struct mfc6_cache, list);
@@ -252,7 +276,7 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
252 276
253 /* exhausted cache_array, show unresolved */ 277 /* exhausted cache_array, show unresolved */
254 read_unlock(&mrt_lock); 278 read_unlock(&mrt_lock);
255 it->cache = &net->ipv6.mfc6_unres_queue; 279 it->cache = &mrt->mfc6_unres_queue;
256 it->ct = 0; 280 it->ct = 0;
257 281
258 spin_lock_bh(&mfc_unres_lock); 282 spin_lock_bh(&mfc_unres_lock);
@@ -270,10 +294,11 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
270{ 294{
271 struct ipmr_mfc_iter *it = seq->private; 295 struct ipmr_mfc_iter *it = seq->private;
272 struct net *net = seq_file_net(seq); 296 struct net *net = seq_file_net(seq);
297 struct mr6_table *mrt = net->ipv6.mrt6;
273 298
274 if (it->cache == &net->ipv6.mfc6_unres_queue) 299 if (it->cache == &mrt->mfc6_unres_queue)
275 spin_unlock_bh(&mfc_unres_lock); 300 spin_unlock_bh(&mfc_unres_lock);
276 else if (it->cache == net->ipv6.mfc6_cache_array) 301 else if (it->cache == mrt->mfc6_cache_array)
277 read_unlock(&mrt_lock); 302 read_unlock(&mrt_lock);
278} 303}
279 304
@@ -281,6 +306,7 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
281{ 306{
282 int n; 307 int n;
283 struct net *net = seq_file_net(seq); 308 struct net *net = seq_file_net(seq);
309 struct mr6_table *mrt = net->ipv6.mrt6;
284 310
285 if (v == SEQ_START_TOKEN) { 311 if (v == SEQ_START_TOKEN) {
286 seq_puts(seq, 312 seq_puts(seq,
@@ -295,14 +321,14 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
295 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin, 321 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
296 mfc->mf6c_parent); 322 mfc->mf6c_parent);
297 323
298 if (it->cache != &net->ipv6.mfc6_unres_queue) { 324 if (it->cache != &mrt->mfc6_unres_queue) {
299 seq_printf(seq, " %8lu %8lu %8lu", 325 seq_printf(seq, " %8lu %8lu %8lu",
300 mfc->mfc_un.res.pkt, 326 mfc->mfc_un.res.pkt,
301 mfc->mfc_un.res.bytes, 327 mfc->mfc_un.res.bytes,
302 mfc->mfc_un.res.wrong_if); 328 mfc->mfc_un.res.wrong_if);
303 for (n = mfc->mfc_un.res.minvif; 329 for (n = mfc->mfc_un.res.minvif;
304 n < mfc->mfc_un.res.maxvif; n++) { 330 n < mfc->mfc_un.res.maxvif; n++) {
305 if (MIF_EXISTS(net, n) && 331 if (MIF_EXISTS(mrt, n) &&
306 mfc->mfc_un.res.ttls[n] < 255) 332 mfc->mfc_un.res.ttls[n] < 255)
307 seq_printf(seq, 333 seq_printf(seq,
308 " %2d:%-3d", 334 " %2d:%-3d",
@@ -349,7 +375,8 @@ static int pim6_rcv(struct sk_buff *skb)
349 struct ipv6hdr *encap; 375 struct ipv6hdr *encap;
350 struct net_device *reg_dev = NULL; 376 struct net_device *reg_dev = NULL;
351 struct net *net = dev_net(skb->dev); 377 struct net *net = dev_net(skb->dev);
352 int reg_vif_num = net->ipv6.mroute_reg_vif_num; 378 struct mr6_table *mrt = net->ipv6.mrt6;
379 int reg_vif_num = mrt->mroute_reg_vif_num;
353 380
354 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) 381 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
355 goto drop; 382 goto drop;
@@ -374,7 +401,7 @@ static int pim6_rcv(struct sk_buff *skb)
374 401
375 read_lock(&mrt_lock); 402 read_lock(&mrt_lock);
376 if (reg_vif_num >= 0) 403 if (reg_vif_num >= 0)
377 reg_dev = net->ipv6.vif6_table[reg_vif_num].dev; 404 reg_dev = mrt->vif6_table[reg_vif_num].dev;
378 if (reg_dev) 405 if (reg_dev)
379 dev_hold(reg_dev); 406 dev_hold(reg_dev);
380 read_unlock(&mrt_lock); 407 read_unlock(&mrt_lock);
@@ -411,12 +438,12 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
411 struct net_device *dev) 438 struct net_device *dev)
412{ 439{
413 struct net *net = dev_net(dev); 440 struct net *net = dev_net(dev);
441 struct mr6_table *mrt = net->ipv6.mrt6;
414 442
415 read_lock(&mrt_lock); 443 read_lock(&mrt_lock);
416 dev->stats.tx_bytes += skb->len; 444 dev->stats.tx_bytes += skb->len;
417 dev->stats.tx_packets++; 445 dev->stats.tx_packets++;
418 ip6mr_cache_report(net, skb, net->ipv6.mroute_reg_vif_num, 446 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
419 MRT6MSG_WHOLEPKT);
420 read_unlock(&mrt_lock); 447 read_unlock(&mrt_lock);
421 kfree_skb(skb); 448 kfree_skb(skb);
422 return NETDEV_TX_OK; 449 return NETDEV_TX_OK;
@@ -472,15 +499,16 @@ failure:
472 * Delete a VIF entry 499 * Delete a VIF entry
473 */ 500 */
474 501
475static int mif6_delete(struct net *net, int vifi, struct list_head *head) 502static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
476{ 503{
477 struct mif_device *v; 504 struct mif_device *v;
478 struct net_device *dev; 505 struct net_device *dev;
479 struct inet6_dev *in6_dev; 506 struct inet6_dev *in6_dev;
480 if (vifi < 0 || vifi >= net->ipv6.maxvif) 507
508 if (vifi < 0 || vifi >= mrt->maxvif)
481 return -EADDRNOTAVAIL; 509 return -EADDRNOTAVAIL;
482 510
483 v = &net->ipv6.vif6_table[vifi]; 511 v = &mrt->vif6_table[vifi];
484 512
485 write_lock_bh(&mrt_lock); 513 write_lock_bh(&mrt_lock);
486 dev = v->dev; 514 dev = v->dev;
@@ -492,17 +520,17 @@ static int mif6_delete(struct net *net, int vifi, struct list_head *head)
492 } 520 }
493 521
494#ifdef CONFIG_IPV6_PIMSM_V2 522#ifdef CONFIG_IPV6_PIMSM_V2
495 if (vifi == net->ipv6.mroute_reg_vif_num) 523 if (vifi == mrt->mroute_reg_vif_num)
496 net->ipv6.mroute_reg_vif_num = -1; 524 mrt->mroute_reg_vif_num = -1;
497#endif 525#endif
498 526
499 if (vifi + 1 == net->ipv6.maxvif) { 527 if (vifi + 1 == mrt->maxvif) {
500 int tmp; 528 int tmp;
501 for (tmp = vifi - 1; tmp >= 0; tmp--) { 529 for (tmp = vifi - 1; tmp >= 0; tmp--) {
502 if (MIF_EXISTS(net, tmp)) 530 if (MIF_EXISTS(mrt, tmp))
503 break; 531 break;
504 } 532 }
505 net->ipv6.maxvif = tmp + 1; 533 mrt->maxvif = tmp + 1;
506 } 534 }
507 535
508 write_unlock_bh(&mrt_lock); 536 write_unlock_bh(&mrt_lock);
@@ -529,11 +557,12 @@ static inline void ip6mr_cache_free(struct mfc6_cache *c)
529 and reporting error to netlink readers. 557 and reporting error to netlink readers.
530 */ 558 */
531 559
532static void ip6mr_destroy_unres(struct net *net, struct mfc6_cache *c) 560static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
533{ 561{
562 struct net *net = read_pnet(&mrt->net);
534 struct sk_buff *skb; 563 struct sk_buff *skb;
535 564
536 atomic_dec(&net->ipv6.cache_resolve_queue_len); 565 atomic_dec(&mrt->cache_resolve_queue_len);
537 566
538 while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) { 567 while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
539 if (ipv6_hdr(skb)->version == 0) { 568 if (ipv6_hdr(skb)->version == 0) {
@@ -553,13 +582,13 @@ static void ip6mr_destroy_unres(struct net *net, struct mfc6_cache *c)
553 582
554/* Timer process for all the unresolved queue. */ 583/* Timer process for all the unresolved queue. */
555 584
556static void ipmr_do_expire_process(struct net *net) 585static void ipmr_do_expire_process(struct mr6_table *mrt)
557{ 586{
558 unsigned long now = jiffies; 587 unsigned long now = jiffies;
559 unsigned long expires = 10 * HZ; 588 unsigned long expires = 10 * HZ;
560 struct mfc6_cache *c, *next; 589 struct mfc6_cache *c, *next;
561 590
562 list_for_each_entry_safe(c, next, &net->ipv6.mfc6_unres_queue, list) { 591 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
563 if (time_after(c->mfc_un.unres.expires, now)) { 592 if (time_after(c->mfc_un.unres.expires, now)) {
564 /* not yet... */ 593 /* not yet... */
565 unsigned long interval = c->mfc_un.unres.expires - now; 594 unsigned long interval = c->mfc_un.unres.expires - now;
@@ -569,31 +598,31 @@ static void ipmr_do_expire_process(struct net *net)
569 } 598 }
570 599
571 list_del(&c->list); 600 list_del(&c->list);
572 ip6mr_destroy_unres(net, c); 601 ip6mr_destroy_unres(mrt, c);
573 } 602 }
574 603
575 if (!list_empty(&net->ipv6.mfc6_unres_queue)) 604 if (!list_empty(&mrt->mfc6_unres_queue))
576 mod_timer(&net->ipv6.ipmr_expire_timer, jiffies + expires); 605 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
577} 606}
578 607
579static void ipmr_expire_process(unsigned long arg) 608static void ipmr_expire_process(unsigned long arg)
580{ 609{
581 struct net *net = (struct net *)arg; 610 struct mr6_table *mrt = (struct mr6_table *)arg;
582 611
583 if (!spin_trylock(&mfc_unres_lock)) { 612 if (!spin_trylock(&mfc_unres_lock)) {
584 mod_timer(&net->ipv6.ipmr_expire_timer, jiffies + 1); 613 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
585 return; 614 return;
586 } 615 }
587 616
588 if (!list_empty(&net->ipv6.mfc6_unres_queue)) 617 if (!list_empty(&mrt->mfc6_unres_queue))
589 ipmr_do_expire_process(net); 618 ipmr_do_expire_process(mrt);
590 619
591 spin_unlock(&mfc_unres_lock); 620 spin_unlock(&mfc_unres_lock);
592} 621}
593 622
594/* Fill oifs list. It is called under write locked mrt_lock. */ 623/* Fill oifs list. It is called under write locked mrt_lock. */
595 624
596static void ip6mr_update_thresholds(struct net *net, struct mfc6_cache *cache, 625static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
597 unsigned char *ttls) 626 unsigned char *ttls)
598{ 627{
599 int vifi; 628 int vifi;
@@ -602,8 +631,8 @@ static void ip6mr_update_thresholds(struct net *net, struct mfc6_cache *cache,
602 cache->mfc_un.res.maxvif = 0; 631 cache->mfc_un.res.maxvif = 0;
603 memset(cache->mfc_un.res.ttls, 255, MAXMIFS); 632 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
604 633
605 for (vifi = 0; vifi < net->ipv6.maxvif; vifi++) { 634 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
606 if (MIF_EXISTS(net, vifi) && 635 if (MIF_EXISTS(mrt, vifi) &&
607 ttls[vifi] && ttls[vifi] < 255) { 636 ttls[vifi] && ttls[vifi] < 255) {
608 cache->mfc_un.res.ttls[vifi] = ttls[vifi]; 637 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
609 if (cache->mfc_un.res.minvif > vifi) 638 if (cache->mfc_un.res.minvif > vifi)
@@ -614,16 +643,17 @@ static void ip6mr_update_thresholds(struct net *net, struct mfc6_cache *cache,
614 } 643 }
615} 644}
616 645
617static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock) 646static int mif6_add(struct net *net, struct mr6_table *mrt,
647 struct mif6ctl *vifc, int mrtsock)
618{ 648{
619 int vifi = vifc->mif6c_mifi; 649 int vifi = vifc->mif6c_mifi;
620 struct mif_device *v = &net->ipv6.vif6_table[vifi]; 650 struct mif_device *v = &mrt->vif6_table[vifi];
621 struct net_device *dev; 651 struct net_device *dev;
622 struct inet6_dev *in6_dev; 652 struct inet6_dev *in6_dev;
623 int err; 653 int err;
624 654
625 /* Is vif busy ? */ 655 /* Is vif busy ? */
626 if (MIF_EXISTS(net, vifi)) 656 if (MIF_EXISTS(mrt, vifi))
627 return -EADDRINUSE; 657 return -EADDRINUSE;
628 658
629 switch (vifc->mif6c_flags) { 659 switch (vifc->mif6c_flags) {
@@ -633,7 +663,7 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
633 * Special Purpose VIF in PIM 663 * Special Purpose VIF in PIM
634 * All the packets will be sent to the daemon 664 * All the packets will be sent to the daemon
635 */ 665 */
636 if (net->ipv6.mroute_reg_vif_num >= 0) 666 if (mrt->mroute_reg_vif_num >= 0)
637 return -EADDRINUSE; 667 return -EADDRINUSE;
638 dev = ip6mr_reg_vif(net); 668 dev = ip6mr_reg_vif(net);
639 if (!dev) 669 if (!dev)
@@ -685,22 +715,22 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
685 v->dev = dev; 715 v->dev = dev;
686#ifdef CONFIG_IPV6_PIMSM_V2 716#ifdef CONFIG_IPV6_PIMSM_V2
687 if (v->flags & MIFF_REGISTER) 717 if (v->flags & MIFF_REGISTER)
688 net->ipv6.mroute_reg_vif_num = vifi; 718 mrt->mroute_reg_vif_num = vifi;
689#endif 719#endif
690 if (vifi + 1 > net->ipv6.maxvif) 720 if (vifi + 1 > mrt->maxvif)
691 net->ipv6.maxvif = vifi + 1; 721 mrt->maxvif = vifi + 1;
692 write_unlock_bh(&mrt_lock); 722 write_unlock_bh(&mrt_lock);
693 return 0; 723 return 0;
694} 724}
695 725
696static struct mfc6_cache *ip6mr_cache_find(struct net *net, 726static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
697 struct in6_addr *origin, 727 struct in6_addr *origin,
698 struct in6_addr *mcastgrp) 728 struct in6_addr *mcastgrp)
699{ 729{
700 int line = MFC6_HASH(mcastgrp, origin); 730 int line = MFC6_HASH(mcastgrp, origin);
701 struct mfc6_cache *c; 731 struct mfc6_cache *c;
702 732
703 list_for_each_entry(c, &net->ipv6.mfc6_cache_array[line], list) { 733 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
704 if (ipv6_addr_equal(&c->mf6c_origin, origin) && 734 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
705 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) 735 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
706 return c; 736 return c;
@@ -734,8 +764,8 @@ static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
734 * A cache entry has gone into a resolved state from queued 764 * A cache entry has gone into a resolved state from queued
735 */ 765 */
736 766
737static void ip6mr_cache_resolve(struct net *net, struct mfc6_cache *uc, 767static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
738 struct mfc6_cache *c) 768 struct mfc6_cache *uc, struct mfc6_cache *c)
739{ 769{
740 struct sk_buff *skb; 770 struct sk_buff *skb;
741 771
@@ -748,7 +778,7 @@ static void ip6mr_cache_resolve(struct net *net, struct mfc6_cache *uc,
748 int err; 778 int err;
749 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr)); 779 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
750 780
751 if (ip6mr_fill_mroute(net, skb, c, NLMSG_DATA(nlh)) > 0) { 781 if (ip6mr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
752 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh; 782 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
753 } else { 783 } else {
754 nlh->nlmsg_type = NLMSG_ERROR; 784 nlh->nlmsg_type = NLMSG_ERROR;
@@ -758,7 +788,7 @@ static void ip6mr_cache_resolve(struct net *net, struct mfc6_cache *uc,
758 } 788 }
759 err = rtnl_unicast(skb, net, NETLINK_CB(skb).pid); 789 err = rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
760 } else 790 } else
761 ip6_mr_forward(net, skb, c); 791 ip6_mr_forward(net, mrt, skb, c);
762 } 792 }
763} 793}
764 794
@@ -769,8 +799,8 @@ static void ip6mr_cache_resolve(struct net *net, struct mfc6_cache *uc,
769 * Called under mrt_lock. 799 * Called under mrt_lock.
770 */ 800 */
771 801
772static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, 802static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
773 int assert) 803 mifi_t mifi, int assert)
774{ 804{
775 struct sk_buff *skb; 805 struct sk_buff *skb;
776 struct mrt6msg *msg; 806 struct mrt6msg *msg;
@@ -806,7 +836,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
806 msg = (struct mrt6msg *)skb_transport_header(skb); 836 msg = (struct mrt6msg *)skb_transport_header(skb);
807 msg->im6_mbz = 0; 837 msg->im6_mbz = 0;
808 msg->im6_msgtype = MRT6MSG_WHOLEPKT; 838 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
809 msg->im6_mif = net->ipv6.mroute_reg_vif_num; 839 msg->im6_mif = mrt->mroute_reg_vif_num;
810 msg->im6_pad = 0; 840 msg->im6_pad = 0;
811 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr); 841 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
812 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr); 842 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
@@ -841,7 +871,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
841 skb->ip_summed = CHECKSUM_UNNECESSARY; 871 skb->ip_summed = CHECKSUM_UNNECESSARY;
842 } 872 }
843 873
844 if (net->ipv6.mroute6_sk == NULL) { 874 if (mrt->mroute6_sk == NULL) {
845 kfree_skb(skb); 875 kfree_skb(skb);
846 return -EINVAL; 876 return -EINVAL;
847 } 877 }
@@ -849,7 +879,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
849 /* 879 /*
850 * Deliver to user space multicast routing algorithms 880 * Deliver to user space multicast routing algorithms
851 */ 881 */
852 ret = sock_queue_rcv_skb(net->ipv6.mroute6_sk, skb); 882 ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
853 if (ret < 0) { 883 if (ret < 0) {
854 if (net_ratelimit()) 884 if (net_ratelimit())
855 printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n"); 885 printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
@@ -864,14 +894,14 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
864 */ 894 */
865 895
866static int 896static int
867ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) 897ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
868{ 898{
869 bool found = false; 899 bool found = false;
870 int err; 900 int err;
871 struct mfc6_cache *c; 901 struct mfc6_cache *c;
872 902
873 spin_lock_bh(&mfc_unres_lock); 903 spin_lock_bh(&mfc_unres_lock);
874 list_for_each_entry(c, &net->ipv6.mfc6_unres_queue, list) { 904 list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
875 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) && 905 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
876 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) { 906 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
877 found = true; 907 found = true;
@@ -884,7 +914,7 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb)
884 * Create a new entry if allowable 914 * Create a new entry if allowable
885 */ 915 */
886 916
887 if (atomic_read(&net->ipv6.cache_resolve_queue_len) >= 10 || 917 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
888 (c = ip6mr_cache_alloc_unres()) == NULL) { 918 (c = ip6mr_cache_alloc_unres()) == NULL) {
889 spin_unlock_bh(&mfc_unres_lock); 919 spin_unlock_bh(&mfc_unres_lock);
890 920
@@ -902,7 +932,7 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb)
902 /* 932 /*
903 * Reflect first query at pim6sd 933 * Reflect first query at pim6sd
904 */ 934 */
905 err = ip6mr_cache_report(net, skb, mifi, MRT6MSG_NOCACHE); 935 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
906 if (err < 0) { 936 if (err < 0) {
907 /* If the report failed throw the cache entry 937 /* If the report failed throw the cache entry
908 out - Brad Parker 938 out - Brad Parker
@@ -914,10 +944,10 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb)
914 return err; 944 return err;
915 } 945 }
916 946
917 atomic_inc(&net->ipv6.cache_resolve_queue_len); 947 atomic_inc(&mrt->cache_resolve_queue_len);
918 list_add(&c->list, &net->ipv6.mfc6_unres_queue); 948 list_add(&c->list, &mrt->mfc6_unres_queue);
919 949
920 ipmr_do_expire_process(net); 950 ipmr_do_expire_process(mrt);
921 } 951 }
922 952
923 /* 953 /*
@@ -939,14 +969,14 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb)
939 * MFC6 cache manipulation by user space 969 * MFC6 cache manipulation by user space
940 */ 970 */
941 971
942static int ip6mr_mfc_delete(struct net *net, struct mf6cctl *mfc) 972static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc)
943{ 973{
944 int line; 974 int line;
945 struct mfc6_cache *c, *next; 975 struct mfc6_cache *c, *next;
946 976
947 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr); 977 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
948 978
949 list_for_each_entry_safe(c, next, &net->ipv6.mfc6_cache_array[line], list) { 979 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
950 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && 980 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
951 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) { 981 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
952 write_lock_bh(&mrt_lock); 982 write_lock_bh(&mrt_lock);
@@ -965,6 +995,7 @@ static int ip6mr_device_event(struct notifier_block *this,
965{ 995{
966 struct net_device *dev = ptr; 996 struct net_device *dev = ptr;
967 struct net *net = dev_net(dev); 997 struct net *net = dev_net(dev);
998 struct mr6_table *mrt = net->ipv6.mrt6;
968 struct mif_device *v; 999 struct mif_device *v;
969 int ct; 1000 int ct;
970 LIST_HEAD(list); 1001 LIST_HEAD(list);
@@ -972,10 +1003,10 @@ static int ip6mr_device_event(struct notifier_block *this,
972 if (event != NETDEV_UNREGISTER) 1003 if (event != NETDEV_UNREGISTER)
973 return NOTIFY_DONE; 1004 return NOTIFY_DONE;
974 1005
975 v = &net->ipv6.vif6_table[0]; 1006 v = &mrt->vif6_table[0];
976 for (ct = 0; ct < net->ipv6.maxvif; ct++, v++) { 1007 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
977 if (v->dev == dev) 1008 if (v->dev == dev)
978 mif6_delete(net, ct, &list); 1009 mif6_delete(mrt, ct, &list);
979 } 1010 }
980 unregister_netdevice_many(&list); 1011 unregister_netdevice_many(&list);
981 1012
@@ -992,35 +1023,28 @@ static struct notifier_block ip6_mr_notifier = {
992 1023
993static int __net_init ip6mr_net_init(struct net *net) 1024static int __net_init ip6mr_net_init(struct net *net)
994{ 1025{
1026 struct mr6_table *mrt;
995 unsigned int i; 1027 unsigned int i;
996 int err = 0; 1028 int err = 0;
997 1029
998 net->ipv6.vif6_table = kcalloc(MAXMIFS, sizeof(struct mif_device), 1030 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
999 GFP_KERNEL); 1031 if (mrt == NULL) {
1000 if (!net->ipv6.vif6_table) {
1001 err = -ENOMEM; 1032 err = -ENOMEM;
1002 goto fail; 1033 goto fail;
1003 } 1034 }
1004 1035
1005 /* Forwarding cache */ 1036 write_pnet(&mrt->net, net);
1006 net->ipv6.mfc6_cache_array = kcalloc(MFC6_LINES,
1007 sizeof(struct list_head),
1008 GFP_KERNEL);
1009 if (!net->ipv6.mfc6_cache_array) {
1010 err = -ENOMEM;
1011 goto fail_mfc6_cache;
1012 }
1013 1037
1014 for (i = 0; i < MFC6_LINES; i++) 1038 for (i = 0; i < MFC6_LINES; i++)
1015 INIT_LIST_HEAD(&net->ipv6.mfc6_cache_array[i]); 1039 INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
1016 1040
1017 INIT_LIST_HEAD(&net->ipv6.mfc6_unres_queue); 1041 INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
1018 1042
1019 setup_timer(&net->ipv6.ipmr_expire_timer, ipmr_expire_process, 1043 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
1020 (unsigned long)net); 1044 (unsigned long)mrt);
1021 1045
1022#ifdef CONFIG_IPV6_PIMSM_V2 1046#ifdef CONFIG_IPV6_PIMSM_V2
1023 net->ipv6.mroute_reg_vif_num = -1; 1047 mrt->mroute_reg_vif_num = -1;
1024#endif 1048#endif
1025 1049
1026#ifdef CONFIG_PROC_FS 1050#ifdef CONFIG_PROC_FS
@@ -1030,30 +1054,31 @@ static int __net_init ip6mr_net_init(struct net *net)
1030 if (!proc_net_fops_create(net, "ip6_mr_cache", 0, &ip6mr_mfc_fops)) 1054 if (!proc_net_fops_create(net, "ip6_mr_cache", 0, &ip6mr_mfc_fops))
1031 goto proc_cache_fail; 1055 goto proc_cache_fail;
1032#endif 1056#endif
1057
1058 net->ipv6.mrt6 = mrt;
1033 return 0; 1059 return 0;
1034 1060
1035#ifdef CONFIG_PROC_FS 1061#ifdef CONFIG_PROC_FS
1036proc_cache_fail: 1062proc_cache_fail:
1037 proc_net_remove(net, "ip6_mr_vif"); 1063 proc_net_remove(net, "ip6_mr_vif");
1038proc_vif_fail: 1064proc_vif_fail:
1039 kfree(net->ipv6.mfc6_cache_array); 1065 kfree(mrt);
1040#endif 1066#endif
1041fail_mfc6_cache:
1042 kfree(net->ipv6.vif6_table);
1043fail: 1067fail:
1044 return err; 1068 return err;
1045} 1069}
1046 1070
1047static void __net_exit ip6mr_net_exit(struct net *net) 1071static void __net_exit ip6mr_net_exit(struct net *net)
1048{ 1072{
1073 struct mr6_table *mrt = net->ipv6.mrt6;
1074
1049#ifdef CONFIG_PROC_FS 1075#ifdef CONFIG_PROC_FS
1050 proc_net_remove(net, "ip6_mr_cache"); 1076 proc_net_remove(net, "ip6_mr_cache");
1051 proc_net_remove(net, "ip6_mr_vif"); 1077 proc_net_remove(net, "ip6_mr_vif");
1052#endif 1078#endif
1053 del_timer(&net->ipv6.ipmr_expire_timer); 1079 del_timer(&mrt->ipmr_expire_timer);
1054 mroute_clean_tables(net); 1080 mroute_clean_tables(mrt);
1055 kfree(net->ipv6.mfc6_cache_array); 1081 kfree(mrt);
1056 kfree(net->ipv6.vif6_table);
1057} 1082}
1058 1083
1059static struct pernet_operations ip6mr_net_ops = { 1084static struct pernet_operations ip6mr_net_ops = {
@@ -1105,7 +1130,8 @@ void ip6_mr_cleanup(void)
1105 kmem_cache_destroy(mrt_cachep); 1130 kmem_cache_destroy(mrt_cachep);
1106} 1131}
1107 1132
1108static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) 1133static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1134 struct mf6cctl *mfc, int mrtsock)
1109{ 1135{
1110 bool found = false; 1136 bool found = false;
1111 int line; 1137 int line;
@@ -1125,7 +1151,7 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
1125 1151
1126 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr); 1152 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1127 1153
1128 list_for_each_entry(c, &net->ipv6.mfc6_cache_array[line], list) { 1154 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1129 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && 1155 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1130 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) { 1156 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
1131 found = true; 1157 found = true;
@@ -1136,7 +1162,7 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
1136 if (found) { 1162 if (found) {
1137 write_lock_bh(&mrt_lock); 1163 write_lock_bh(&mrt_lock);
1138 c->mf6c_parent = mfc->mf6cc_parent; 1164 c->mf6c_parent = mfc->mf6cc_parent;
1139 ip6mr_update_thresholds(net, c, ttls); 1165 ip6mr_update_thresholds(mrt, c, ttls);
1140 if (!mrtsock) 1166 if (!mrtsock)
1141 c->mfc_flags |= MFC_STATIC; 1167 c->mfc_flags |= MFC_STATIC;
1142 write_unlock_bh(&mrt_lock); 1168 write_unlock_bh(&mrt_lock);
@@ -1153,12 +1179,12 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
1153 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr; 1179 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1154 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr; 1180 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1155 c->mf6c_parent = mfc->mf6cc_parent; 1181 c->mf6c_parent = mfc->mf6cc_parent;
1156 ip6mr_update_thresholds(net, c, ttls); 1182 ip6mr_update_thresholds(mrt, c, ttls);
1157 if (!mrtsock) 1183 if (!mrtsock)
1158 c->mfc_flags |= MFC_STATIC; 1184 c->mfc_flags |= MFC_STATIC;
1159 1185
1160 write_lock_bh(&mrt_lock); 1186 write_lock_bh(&mrt_lock);
1161 list_add(&c->list, &net->ipv6.mfc6_cache_array[line]); 1187 list_add(&c->list, &mrt->mfc6_cache_array[line]);
1162 write_unlock_bh(&mrt_lock); 1188 write_unlock_bh(&mrt_lock);
1163 1189
1164 /* 1190 /*
@@ -1167,21 +1193,21 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
1167 */ 1193 */
1168 found = false; 1194 found = false;
1169 spin_lock_bh(&mfc_unres_lock); 1195 spin_lock_bh(&mfc_unres_lock);
1170 list_for_each_entry(uc, &net->ipv6.mfc6_unres_queue, list) { 1196 list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
1171 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) && 1197 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1172 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) { 1198 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1173 list_del(&uc->list); 1199 list_del(&uc->list);
1174 atomic_dec(&net->ipv6.cache_resolve_queue_len); 1200 atomic_dec(&mrt->cache_resolve_queue_len);
1175 found = true; 1201 found = true;
1176 break; 1202 break;
1177 } 1203 }
1178 } 1204 }
1179 if (list_empty(&net->ipv6.mfc6_unres_queue)) 1205 if (list_empty(&mrt->mfc6_unres_queue))
1180 del_timer(&net->ipv6.ipmr_expire_timer); 1206 del_timer(&mrt->ipmr_expire_timer);
1181 spin_unlock_bh(&mfc_unres_lock); 1207 spin_unlock_bh(&mfc_unres_lock);
1182 1208
1183 if (found) { 1209 if (found) {
1184 ip6mr_cache_resolve(net, uc, c); 1210 ip6mr_cache_resolve(net, mrt, uc, c);
1185 ip6mr_cache_free(uc); 1211 ip6mr_cache_free(uc);
1186 } 1212 }
1187 return 0; 1213 return 0;
@@ -1191,7 +1217,7 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
1191 * Close the multicast socket, and clear the vif tables etc 1217 * Close the multicast socket, and clear the vif tables etc
1192 */ 1218 */
1193 1219
1194static void mroute_clean_tables(struct net *net) 1220static void mroute_clean_tables(struct mr6_table *mrt)
1195{ 1221{
1196 int i; 1222 int i;
1197 LIST_HEAD(list); 1223 LIST_HEAD(list);
@@ -1200,9 +1226,9 @@ static void mroute_clean_tables(struct net *net)
1200 /* 1226 /*
1201 * Shut down all active vif entries 1227 * Shut down all active vif entries
1202 */ 1228 */
1203 for (i = 0; i < net->ipv6.maxvif; i++) { 1229 for (i = 0; i < mrt->maxvif; i++) {
1204 if (!(net->ipv6.vif6_table[i].flags & VIFF_STATIC)) 1230 if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
1205 mif6_delete(net, i, &list); 1231 mif6_delete(mrt, i, &list);
1206 } 1232 }
1207 unregister_netdevice_many(&list); 1233 unregister_netdevice_many(&list);
1208 1234
@@ -1210,7 +1236,7 @@ static void mroute_clean_tables(struct net *net)
1210 * Wipe the cache 1236 * Wipe the cache
1211 */ 1237 */
1212 for (i = 0; i < MFC6_LINES; i++) { 1238 for (i = 0; i < MFC6_LINES; i++) {
1213 list_for_each_entry_safe(c, next, &net->ipv6.mfc6_cache_array[i], list) { 1239 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1214 if (c->mfc_flags & MFC_STATIC) 1240 if (c->mfc_flags & MFC_STATIC)
1215 continue; 1241 continue;
1216 write_lock_bh(&mrt_lock); 1242 write_lock_bh(&mrt_lock);
@@ -1221,25 +1247,25 @@ static void mroute_clean_tables(struct net *net)
1221 } 1247 }
1222 } 1248 }
1223 1249
1224 if (atomic_read(&net->ipv6.cache_resolve_queue_len) != 0) { 1250 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1225 spin_lock_bh(&mfc_unres_lock); 1251 spin_lock_bh(&mfc_unres_lock);
1226 list_for_each_entry_safe(c, next, &net->ipv6.mfc6_unres_queue, list) { 1252 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
1227 list_del(&c->list); 1253 list_del(&c->list);
1228 ip6mr_destroy_unres(net, c); 1254 ip6mr_destroy_unres(mrt, c);
1229 } 1255 }
1230 spin_unlock_bh(&mfc_unres_lock); 1256 spin_unlock_bh(&mfc_unres_lock);
1231 } 1257 }
1232} 1258}
1233 1259
1234static int ip6mr_sk_init(struct sock *sk) 1260static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
1235{ 1261{
1236 int err = 0; 1262 int err = 0;
1237 struct net *net = sock_net(sk); 1263 struct net *net = sock_net(sk);
1238 1264
1239 rtnl_lock(); 1265 rtnl_lock();
1240 write_lock_bh(&mrt_lock); 1266 write_lock_bh(&mrt_lock);
1241 if (likely(net->ipv6.mroute6_sk == NULL)) { 1267 if (likely(mrt->mroute6_sk == NULL)) {
1242 net->ipv6.mroute6_sk = sk; 1268 mrt->mroute6_sk = sk;
1243 net->ipv6.devconf_all->mc_forwarding++; 1269 net->ipv6.devconf_all->mc_forwarding++;
1244 } 1270 }
1245 else 1271 else
@@ -1255,15 +1281,16 @@ int ip6mr_sk_done(struct sock *sk)
1255{ 1281{
1256 int err = 0; 1282 int err = 0;
1257 struct net *net = sock_net(sk); 1283 struct net *net = sock_net(sk);
1284 struct mr6_table *mrt = net->ipv6.mrt6;
1258 1285
1259 rtnl_lock(); 1286 rtnl_lock();
1260 if (sk == net->ipv6.mroute6_sk) { 1287 if (sk == mrt->mroute6_sk) {
1261 write_lock_bh(&mrt_lock); 1288 write_lock_bh(&mrt_lock);
1262 net->ipv6.mroute6_sk = NULL; 1289 mrt->mroute6_sk = NULL;
1263 net->ipv6.devconf_all->mc_forwarding--; 1290 net->ipv6.devconf_all->mc_forwarding--;
1264 write_unlock_bh(&mrt_lock); 1291 write_unlock_bh(&mrt_lock);
1265 1292
1266 mroute_clean_tables(net); 1293 mroute_clean_tables(mrt);
1267 } else 1294 } else
1268 err = -EACCES; 1295 err = -EACCES;
1269 rtnl_unlock(); 1296 rtnl_unlock();
@@ -1271,6 +1298,13 @@ int ip6mr_sk_done(struct sock *sk)
1271 return err; 1298 return err;
1272} 1299}
1273 1300
1301struct sock *mroute6_socket(struct net *net)
1302{
1303 struct mr6_table *mrt = net->ipv6.mrt6;
1304
1305 return mrt->mroute6_sk;
1306}
1307
1274/* 1308/*
1275 * Socket options and virtual interface manipulation. The whole 1309 * Socket options and virtual interface manipulation. The whole
1276 * virtual interface system is a complete heap, but unfortunately 1310 * virtual interface system is a complete heap, but unfortunately
@@ -1285,9 +1319,10 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1285 struct mf6cctl mfc; 1319 struct mf6cctl mfc;
1286 mifi_t mifi; 1320 mifi_t mifi;
1287 struct net *net = sock_net(sk); 1321 struct net *net = sock_net(sk);
1322 struct mr6_table *mrt = net->ipv6.mrt6;
1288 1323
1289 if (optname != MRT6_INIT) { 1324 if (optname != MRT6_INIT) {
1290 if (sk != net->ipv6.mroute6_sk && !capable(CAP_NET_ADMIN)) 1325 if (sk != mrt->mroute6_sk && !capable(CAP_NET_ADMIN))
1291 return -EACCES; 1326 return -EACCES;
1292 } 1327 }
1293 1328
@@ -1299,7 +1334,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1299 if (optlen < sizeof(int)) 1334 if (optlen < sizeof(int))
1300 return -EINVAL; 1335 return -EINVAL;
1301 1336
1302 return ip6mr_sk_init(sk); 1337 return ip6mr_sk_init(mrt, sk);
1303 1338
1304 case MRT6_DONE: 1339 case MRT6_DONE:
1305 return ip6mr_sk_done(sk); 1340 return ip6mr_sk_done(sk);
@@ -1312,7 +1347,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1312 if (vif.mif6c_mifi >= MAXMIFS) 1347 if (vif.mif6c_mifi >= MAXMIFS)
1313 return -ENFILE; 1348 return -ENFILE;
1314 rtnl_lock(); 1349 rtnl_lock();
1315 ret = mif6_add(net, &vif, sk == net->ipv6.mroute6_sk); 1350 ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
1316 rtnl_unlock(); 1351 rtnl_unlock();
1317 return ret; 1352 return ret;
1318 1353
@@ -1322,7 +1357,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1322 if (copy_from_user(&mifi, optval, sizeof(mifi_t))) 1357 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1323 return -EFAULT; 1358 return -EFAULT;
1324 rtnl_lock(); 1359 rtnl_lock();
1325 ret = mif6_delete(net, mifi, NULL); 1360 ret = mif6_delete(mrt, mifi, NULL);
1326 rtnl_unlock(); 1361 rtnl_unlock();
1327 return ret; 1362 return ret;
1328 1363
@@ -1338,10 +1373,9 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1338 return -EFAULT; 1373 return -EFAULT;
1339 rtnl_lock(); 1374 rtnl_lock();
1340 if (optname == MRT6_DEL_MFC) 1375 if (optname == MRT6_DEL_MFC)
1341 ret = ip6mr_mfc_delete(net, &mfc); 1376 ret = ip6mr_mfc_delete(mrt, &mfc);
1342 else 1377 else
1343 ret = ip6mr_mfc_add(net, &mfc, 1378 ret = ip6mr_mfc_add(net, mrt, &mfc, sk == mrt->mroute6_sk);
1344 sk == net->ipv6.mroute6_sk);
1345 rtnl_unlock(); 1379 rtnl_unlock();
1346 return ret; 1380 return ret;
1347 1381
@@ -1353,7 +1387,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1353 int v; 1387 int v;
1354 if (get_user(v, (int __user *)optval)) 1388 if (get_user(v, (int __user *)optval))
1355 return -EFAULT; 1389 return -EFAULT;
1356 net->ipv6.mroute_do_assert = !!v; 1390 mrt->mroute_do_assert = !!v;
1357 return 0; 1391 return 0;
1358 } 1392 }
1359 1393
@@ -1366,9 +1400,9 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1366 v = !!v; 1400 v = !!v;
1367 rtnl_lock(); 1401 rtnl_lock();
1368 ret = 0; 1402 ret = 0;
1369 if (v != net->ipv6.mroute_do_pim) { 1403 if (v != mrt->mroute_do_pim) {
1370 net->ipv6.mroute_do_pim = v; 1404 mrt->mroute_do_pim = v;
1371 net->ipv6.mroute_do_assert = v; 1405 mrt->mroute_do_assert = v;
1372 } 1406 }
1373 rtnl_unlock(); 1407 rtnl_unlock();
1374 return ret; 1408 return ret;
@@ -1394,6 +1428,7 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1394 int olr; 1428 int olr;
1395 int val; 1429 int val;
1396 struct net *net = sock_net(sk); 1430 struct net *net = sock_net(sk);
1431 struct mr6_table *mrt = net->ipv6.mrt6;
1397 1432
1398 switch (optname) { 1433 switch (optname) {
1399 case MRT6_VERSION: 1434 case MRT6_VERSION:
@@ -1401,11 +1436,11 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1401 break; 1436 break;
1402#ifdef CONFIG_IPV6_PIMSM_V2 1437#ifdef CONFIG_IPV6_PIMSM_V2
1403 case MRT6_PIM: 1438 case MRT6_PIM:
1404 val = net->ipv6.mroute_do_pim; 1439 val = mrt->mroute_do_pim;
1405 break; 1440 break;
1406#endif 1441#endif
1407 case MRT6_ASSERT: 1442 case MRT6_ASSERT:
1408 val = net->ipv6.mroute_do_assert; 1443 val = mrt->mroute_do_assert;
1409 break; 1444 break;
1410 default: 1445 default:
1411 return -ENOPROTOOPT; 1446 return -ENOPROTOOPT;
@@ -1436,16 +1471,17 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1436 struct mif_device *vif; 1471 struct mif_device *vif;
1437 struct mfc6_cache *c; 1472 struct mfc6_cache *c;
1438 struct net *net = sock_net(sk); 1473 struct net *net = sock_net(sk);
1474 struct mr6_table *mrt = net->ipv6.mrt6;
1439 1475
1440 switch (cmd) { 1476 switch (cmd) {
1441 case SIOCGETMIFCNT_IN6: 1477 case SIOCGETMIFCNT_IN6:
1442 if (copy_from_user(&vr, arg, sizeof(vr))) 1478 if (copy_from_user(&vr, arg, sizeof(vr)))
1443 return -EFAULT; 1479 return -EFAULT;
1444 if (vr.mifi >= net->ipv6.maxvif) 1480 if (vr.mifi >= mrt->maxvif)
1445 return -EINVAL; 1481 return -EINVAL;
1446 read_lock(&mrt_lock); 1482 read_lock(&mrt_lock);
1447 vif = &net->ipv6.vif6_table[vr.mifi]; 1483 vif = &mrt->vif6_table[vr.mifi];
1448 if (MIF_EXISTS(net, vr.mifi)) { 1484 if (MIF_EXISTS(mrt, vr.mifi)) {
1449 vr.icount = vif->pkt_in; 1485 vr.icount = vif->pkt_in;
1450 vr.ocount = vif->pkt_out; 1486 vr.ocount = vif->pkt_out;
1451 vr.ibytes = vif->bytes_in; 1487 vr.ibytes = vif->bytes_in;
@@ -1463,7 +1499,7 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1463 return -EFAULT; 1499 return -EFAULT;
1464 1500
1465 read_lock(&mrt_lock); 1501 read_lock(&mrt_lock);
1466 c = ip6mr_cache_find(net, &sr.src.sin6_addr, &sr.grp.sin6_addr); 1502 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1467 if (c) { 1503 if (c) {
1468 sr.pktcnt = c->mfc_un.res.pkt; 1504 sr.pktcnt = c->mfc_un.res.pkt;
1469 sr.bytecnt = c->mfc_un.res.bytes; 1505 sr.bytecnt = c->mfc_un.res.bytes;
@@ -1493,11 +1529,11 @@ static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1493 * Processing handlers for ip6mr_forward 1529 * Processing handlers for ip6mr_forward
1494 */ 1530 */
1495 1531
1496static int ip6mr_forward2(struct net *net, struct sk_buff *skb, 1532static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
1497 struct mfc6_cache *c, int vifi) 1533 struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1498{ 1534{
1499 struct ipv6hdr *ipv6h; 1535 struct ipv6hdr *ipv6h;
1500 struct mif_device *vif = &net->ipv6.vif6_table[vifi]; 1536 struct mif_device *vif = &mrt->vif6_table[vifi];
1501 struct net_device *dev; 1537 struct net_device *dev;
1502 struct dst_entry *dst; 1538 struct dst_entry *dst;
1503 struct flowi fl; 1539 struct flowi fl;
@@ -1511,7 +1547,7 @@ static int ip6mr_forward2(struct net *net, struct sk_buff *skb,
1511 vif->bytes_out += skb->len; 1547 vif->bytes_out += skb->len;
1512 vif->dev->stats.tx_bytes += skb->len; 1548 vif->dev->stats.tx_bytes += skb->len;
1513 vif->dev->stats.tx_packets++; 1549 vif->dev->stats.tx_packets++;
1514 ip6mr_cache_report(net, skb, vifi, MRT6MSG_WHOLEPKT); 1550 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
1515 goto out_free; 1551 goto out_free;
1516 } 1552 }
1517#endif 1553#endif
@@ -1566,19 +1602,19 @@ out_free:
1566 return 0; 1602 return 0;
1567} 1603}
1568 1604
1569static int ip6mr_find_vif(struct net_device *dev) 1605static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
1570{ 1606{
1571 struct net *net = dev_net(dev);
1572 int ct; 1607 int ct;
1573 for (ct = net->ipv6.maxvif - 1; ct >= 0; ct--) { 1608
1574 if (net->ipv6.vif6_table[ct].dev == dev) 1609 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
1610 if (mrt->vif6_table[ct].dev == dev)
1575 break; 1611 break;
1576 } 1612 }
1577 return ct; 1613 return ct;
1578} 1614}
1579 1615
1580static int ip6_mr_forward(struct net *net, struct sk_buff *skb, 1616static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
1581 struct mfc6_cache *cache) 1617 struct sk_buff *skb, struct mfc6_cache *cache)
1582{ 1618{
1583 int psend = -1; 1619 int psend = -1;
1584 int vif, ct; 1620 int vif, ct;
@@ -1590,30 +1626,30 @@ static int ip6_mr_forward(struct net *net, struct sk_buff *skb,
1590 /* 1626 /*
1591 * Wrong interface: drop packet and (maybe) send PIM assert. 1627 * Wrong interface: drop packet and (maybe) send PIM assert.
1592 */ 1628 */
1593 if (net->ipv6.vif6_table[vif].dev != skb->dev) { 1629 if (mrt->vif6_table[vif].dev != skb->dev) {
1594 int true_vifi; 1630 int true_vifi;
1595 1631
1596 cache->mfc_un.res.wrong_if++; 1632 cache->mfc_un.res.wrong_if++;
1597 true_vifi = ip6mr_find_vif(skb->dev); 1633 true_vifi = ip6mr_find_vif(mrt, skb->dev);
1598 1634
1599 if (true_vifi >= 0 && net->ipv6.mroute_do_assert && 1635 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1600 /* pimsm uses asserts, when switching from RPT to SPT, 1636 /* pimsm uses asserts, when switching from RPT to SPT,
1601 so that we cannot check that packet arrived on an oif. 1637 so that we cannot check that packet arrived on an oif.
1602 It is bad, but otherwise we would need to move pretty 1638 It is bad, but otherwise we would need to move pretty
1603 large chunk of pimd to kernel. Ough... --ANK 1639 large chunk of pimd to kernel. Ough... --ANK
1604 */ 1640 */
1605 (net->ipv6.mroute_do_pim || 1641 (mrt->mroute_do_pim ||
1606 cache->mfc_un.res.ttls[true_vifi] < 255) && 1642 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1607 time_after(jiffies, 1643 time_after(jiffies,
1608 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { 1644 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1609 cache->mfc_un.res.last_assert = jiffies; 1645 cache->mfc_un.res.last_assert = jiffies;
1610 ip6mr_cache_report(net, skb, true_vifi, MRT6MSG_WRONGMIF); 1646 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
1611 } 1647 }
1612 goto dont_forward; 1648 goto dont_forward;
1613 } 1649 }
1614 1650
1615 net->ipv6.vif6_table[vif].pkt_in++; 1651 mrt->vif6_table[vif].pkt_in++;
1616 net->ipv6.vif6_table[vif].bytes_in += skb->len; 1652 mrt->vif6_table[vif].bytes_in += skb->len;
1617 1653
1618 /* 1654 /*
1619 * Forward the frame 1655 * Forward the frame
@@ -1623,13 +1659,13 @@ static int ip6_mr_forward(struct net *net, struct sk_buff *skb,
1623 if (psend != -1) { 1659 if (psend != -1) {
1624 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1660 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1625 if (skb2) 1661 if (skb2)
1626 ip6mr_forward2(net, skb2, cache, psend); 1662 ip6mr_forward2(net, mrt, skb2, cache, psend);
1627 } 1663 }
1628 psend = ct; 1664 psend = ct;
1629 } 1665 }
1630 } 1666 }
1631 if (psend != -1) { 1667 if (psend != -1) {
1632 ip6mr_forward2(net, skb, cache, psend); 1668 ip6mr_forward2(net, mrt, skb, cache, psend);
1633 return 0; 1669 return 0;
1634 } 1670 }
1635 1671
@@ -1647,9 +1683,10 @@ int ip6_mr_input(struct sk_buff *skb)
1647{ 1683{
1648 struct mfc6_cache *cache; 1684 struct mfc6_cache *cache;
1649 struct net *net = dev_net(skb->dev); 1685 struct net *net = dev_net(skb->dev);
1686 struct mr6_table *mrt = net->ipv6.mrt6;
1650 1687
1651 read_lock(&mrt_lock); 1688 read_lock(&mrt_lock);
1652 cache = ip6mr_cache_find(net, 1689 cache = ip6mr_cache_find(mrt,
1653 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr); 1690 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
1654 1691
1655 /* 1692 /*
@@ -1658,9 +1695,9 @@ int ip6_mr_input(struct sk_buff *skb)
1658 if (cache == NULL) { 1695 if (cache == NULL) {
1659 int vif; 1696 int vif;
1660 1697
1661 vif = ip6mr_find_vif(skb->dev); 1698 vif = ip6mr_find_vif(mrt, skb->dev);
1662 if (vif >= 0) { 1699 if (vif >= 0) {
1663 int err = ip6mr_cache_unresolved(net, vif, skb); 1700 int err = ip6mr_cache_unresolved(mrt, vif, skb);
1664 read_unlock(&mrt_lock); 1701 read_unlock(&mrt_lock);
1665 1702
1666 return err; 1703 return err;
@@ -1670,7 +1707,7 @@ int ip6_mr_input(struct sk_buff *skb)
1670 return -ENODEV; 1707 return -ENODEV;
1671 } 1708 }
1672 1709
1673 ip6_mr_forward(net, skb, cache); 1710 ip6_mr_forward(net, mrt, skb, cache);
1674 1711
1675 read_unlock(&mrt_lock); 1712 read_unlock(&mrt_lock);
1676 1713
@@ -1679,8 +1716,8 @@ int ip6_mr_input(struct sk_buff *skb)
1679 1716
1680 1717
1681static int 1718static int
1682ip6mr_fill_mroute(struct net *net, struct sk_buff *skb, struct mfc6_cache *c, 1719ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
1683 struct rtmsg *rtm) 1720 struct mfc6_cache *c, struct rtmsg *rtm)
1684{ 1721{
1685 int ct; 1722 int ct;
1686 struct rtnexthop *nhp; 1723 struct rtnexthop *nhp;
@@ -1691,19 +1728,19 @@ ip6mr_fill_mroute(struct net *net, struct sk_buff *skb, struct mfc6_cache *c,
1691 if (c->mf6c_parent > MAXMIFS) 1728 if (c->mf6c_parent > MAXMIFS)
1692 return -ENOENT; 1729 return -ENOENT;
1693 1730
1694 if (MIF_EXISTS(net, c->mf6c_parent)) 1731 if (MIF_EXISTS(mrt, c->mf6c_parent))
1695 RTA_PUT(skb, RTA_IIF, 4, &net->ipv6.vif6_table[c->mf6c_parent].dev->ifindex); 1732 RTA_PUT(skb, RTA_IIF, 4, &mrt->vif6_table[c->mf6c_parent].dev->ifindex);
1696 1733
1697 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); 1734 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1698 1735
1699 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { 1736 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1700 if (MIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) { 1737 if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
1701 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) 1738 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1702 goto rtattr_failure; 1739 goto rtattr_failure;
1703 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 1740 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1704 nhp->rtnh_flags = 0; 1741 nhp->rtnh_flags = 0;
1705 nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; 1742 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1706 nhp->rtnh_ifindex = net->ipv6.vif6_table[ct].dev->ifindex; 1743 nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
1707 nhp->rtnh_len = sizeof(*nhp); 1744 nhp->rtnh_len = sizeof(*nhp);
1708 } 1745 }
1709 } 1746 }
@@ -1721,11 +1758,12 @@ int ip6mr_get_route(struct net *net,
1721 struct sk_buff *skb, struct rtmsg *rtm, int nowait) 1758 struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1722{ 1759{
1723 int err; 1760 int err;
1761 struct mr6_table *mrt = net->ipv6.mrt6;
1724 struct mfc6_cache *cache; 1762 struct mfc6_cache *cache;
1725 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); 1763 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
1726 1764
1727 read_lock(&mrt_lock); 1765 read_lock(&mrt_lock);
1728 cache = ip6mr_cache_find(net, &rt->rt6i_src.addr, &rt->rt6i_dst.addr); 1766 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
1729 1767
1730 if (!cache) { 1768 if (!cache) {
1731 struct sk_buff *skb2; 1769 struct sk_buff *skb2;
@@ -1739,7 +1777,7 @@ int ip6mr_get_route(struct net *net,
1739 } 1777 }
1740 1778
1741 dev = skb->dev; 1779 dev = skb->dev;
1742 if (dev == NULL || (vif = ip6mr_find_vif(dev)) < 0) { 1780 if (dev == NULL || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
1743 read_unlock(&mrt_lock); 1781 read_unlock(&mrt_lock);
1744 return -ENODEV; 1782 return -ENODEV;
1745 } 1783 }
@@ -1768,7 +1806,7 @@ int ip6mr_get_route(struct net *net,
1768 ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr); 1806 ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
1769 ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr); 1807 ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
1770 1808
1771 err = ip6mr_cache_unresolved(net, vif, skb2); 1809 err = ip6mr_cache_unresolved(mrt, vif, skb2);
1772 read_unlock(&mrt_lock); 1810 read_unlock(&mrt_lock);
1773 1811
1774 return err; 1812 return err;
@@ -1777,7 +1815,7 @@ int ip6mr_get_route(struct net *net,
1777 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) 1815 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1778 cache->mfc_flags |= MFC_NOTIFY; 1816 cache->mfc_flags |= MFC_NOTIFY;
1779 1817
1780 err = ip6mr_fill_mroute(net, skb, cache, rtm); 1818 err = ip6mr_fill_mroute(mrt, skb, cache, rtm);
1781 read_unlock(&mrt_lock); 1819 read_unlock(&mrt_lock);
1782 return err; 1820 return err;
1783} 1821}