diff options
author | David S. Miller <davem@davemloft.net> | 2010-05-13 02:30:45 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-05-13 02:30:45 -0400 |
commit | bf47f4b0babe287f250dd720b41ecad9b8f7ac9b (patch) | |
tree | 0829ef3bff455f98155b5cb0e91b71db36de3510 /net | |
parent | 0b9715e64f7d46a9620d4d5042a5e28f5595ed54 (diff) | |
parent | 5b285cac3570a935aaa28312c1ea28f9e01c5452 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/ipmr-2.6
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv6/Kconfig | 14 | ||||
-rw-r--r-- | net/ipv6/ip6_output.c | 2 | ||||
-rw-r--r-- | net/ipv6/ip6mr.c | 932 |
3 files changed, 683 insertions, 265 deletions
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index a578096152ab..36d7437ac054 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig | |||
@@ -229,6 +229,20 @@ config IPV6_MROUTE | |||
229 | Experimental support for IPv6 multicast forwarding. | 229 | Experimental support for IPv6 multicast forwarding. |
230 | If unsure, say N. | 230 | If unsure, say N. |
231 | 231 | ||
232 | config IPV6_MROUTE_MULTIPLE_TABLES | ||
233 | bool "IPv6: multicast policy routing" | ||
234 | depends on IPV6_MROUTE | ||
235 | select FIB_RULES | ||
236 | help | ||
237 | Normally, a multicast router runs a userspace daemon and decides | ||
238 | what to do with a multicast packet based on the source and | ||
239 | destination addresses. If you say Y here, the multicast router | ||
240 | will also be able to take interfaces and packet marks into | ||
241 | account and run multiple instances of userspace daemons | ||
242 | simultaneously, each one handling a single table. | ||
243 | |||
244 | If unsure, say N. | ||
245 | |||
232 | config IPV6_PIMSM_V2 | 246 | config IPV6_PIMSM_V2 |
233 | bool "IPv6: PIM-SM version 2 support (EXPERIMENTAL)" | 247 | bool "IPv6: PIM-SM version 2 support (EXPERIMENTAL)" |
234 | depends on IPV6_MROUTE | 248 | depends on IPV6_MROUTE |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 5173acaeb501..cd963f64e27c 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -108,7 +108,7 @@ static int ip6_finish_output2(struct sk_buff *skb) | |||
108 | struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); | 108 | struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); |
109 | 109 | ||
110 | if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) && | 110 | if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) && |
111 | ((mroute6_socket(dev_net(dev)) && | 111 | ((mroute6_socket(dev_net(dev), skb) && |
112 | !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || | 112 | !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || |
113 | ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, | 113 | ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, |
114 | &ipv6_hdr(skb)->saddr))) { | 114 | &ipv6_hdr(skb)->saddr))) { |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index e0b530ca394c..163850e22b11 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/if_arp.h> | 42 | #include <linux/if_arp.h> |
43 | #include <net/checksum.h> | 43 | #include <net/checksum.h> |
44 | #include <net/netlink.h> | 44 | #include <net/netlink.h> |
45 | #include <net/fib_rules.h> | ||
45 | 46 | ||
46 | #include <net/ipv6.h> | 47 | #include <net/ipv6.h> |
47 | #include <net/ip6_route.h> | 48 | #include <net/ip6_route.h> |
@@ -51,6 +52,34 @@ | |||
51 | #include <linux/netfilter_ipv6.h> | 52 | #include <linux/netfilter_ipv6.h> |
52 | #include <net/ip6_checksum.h> | 53 | #include <net/ip6_checksum.h> |
53 | 54 | ||
55 | struct mr6_table { | ||
56 | struct list_head list; | ||
57 | #ifdef CONFIG_NET_NS | ||
58 | struct net *net; | ||
59 | #endif | ||
60 | u32 id; | ||
61 | struct sock *mroute6_sk; | ||
62 | struct timer_list ipmr_expire_timer; | ||
63 | struct list_head mfc6_unres_queue; | ||
64 | struct list_head mfc6_cache_array[MFC6_LINES]; | ||
65 | struct mif_device vif6_table[MAXMIFS]; | ||
66 | int maxvif; | ||
67 | atomic_t cache_resolve_queue_len; | ||
68 | int mroute_do_assert; | ||
69 | int mroute_do_pim; | ||
70 | #ifdef CONFIG_IPV6_PIMSM_V2 | ||
71 | int mroute_reg_vif_num; | ||
72 | #endif | ||
73 | }; | ||
74 | |||
75 | struct ip6mr_rule { | ||
76 | struct fib_rule common; | ||
77 | }; | ||
78 | |||
79 | struct ip6mr_result { | ||
80 | struct mr6_table *mrt; | ||
81 | }; | ||
82 | |||
54 | /* Big lock, protecting vif table, mrt cache and mroute socket state. | 83 | /* Big lock, protecting vif table, mrt cache and mroute socket state. |
55 | Note that the changes are semaphored via rtnl_lock. | 84 | Note that the changes are semaphored via rtnl_lock. |
56 | */ | 85 | */ |
@@ -61,9 +90,7 @@ static DEFINE_RWLOCK(mrt_lock); | |||
61 | * Multicast router control variables | 90 | * Multicast router control variables |
62 | */ | 91 | */ |
63 | 92 | ||
64 | #define MIF_EXISTS(_net, _idx) ((_net)->ipv6.vif6_table[_idx].dev != NULL) | 93 | #define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL) |
65 | |||
66 | static struct mfc6_cache *mfc_unres_queue; /* Queue of unresolved entries */ | ||
67 | 94 | ||
68 | /* Special spinlock for queue of unresolved entries */ | 95 | /* Special spinlock for queue of unresolved entries */ |
69 | static DEFINE_SPINLOCK(mfc_unres_lock); | 96 | static DEFINE_SPINLOCK(mfc_unres_lock); |
@@ -78,20 +105,233 @@ static DEFINE_SPINLOCK(mfc_unres_lock); | |||
78 | 105 | ||
79 | static struct kmem_cache *mrt_cachep __read_mostly; | 106 | static struct kmem_cache *mrt_cachep __read_mostly; |
80 | 107 | ||
81 | static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache); | 108 | static struct mr6_table *ip6mr_new_table(struct net *net, u32 id); |
82 | static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, | 109 | static void ip6mr_free_table(struct mr6_table *mrt); |
110 | |||
111 | static int ip6_mr_forward(struct net *net, struct mr6_table *mrt, | ||
112 | struct sk_buff *skb, struct mfc6_cache *cache); | ||
113 | static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt, | ||
83 | mifi_t mifi, int assert); | 114 | mifi_t mifi, int assert); |
84 | static int ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm); | 115 | static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, |
85 | static void mroute_clean_tables(struct net *net); | 116 | struct mfc6_cache *c, struct rtmsg *rtm); |
117 | static int ip6mr_rtm_dumproute(struct sk_buff *skb, | ||
118 | struct netlink_callback *cb); | ||
119 | static void mroute_clean_tables(struct mr6_table *mrt); | ||
120 | static void ipmr_expire_process(unsigned long arg); | ||
121 | |||
122 | #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES | ||
123 | #define ip6mr_for_each_table(mrt, met) \ | ||
124 | list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list) | ||
125 | |||
126 | static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) | ||
127 | { | ||
128 | struct mr6_table *mrt; | ||
129 | |||
130 | ip6mr_for_each_table(mrt, net) { | ||
131 | if (mrt->id == id) | ||
132 | return mrt; | ||
133 | } | ||
134 | return NULL; | ||
135 | } | ||
136 | |||
137 | static int ip6mr_fib_lookup(struct net *net, struct flowi *flp, | ||
138 | struct mr6_table **mrt) | ||
139 | { | ||
140 | struct ip6mr_result res; | ||
141 | struct fib_lookup_arg arg = { .result = &res, }; | ||
142 | int err; | ||
143 | |||
144 | err = fib_rules_lookup(net->ipv6.mr6_rules_ops, flp, 0, &arg); | ||
145 | if (err < 0) | ||
146 | return err; | ||
147 | *mrt = res.mrt; | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp, | ||
152 | int flags, struct fib_lookup_arg *arg) | ||
153 | { | ||
154 | struct ip6mr_result *res = arg->result; | ||
155 | struct mr6_table *mrt; | ||
156 | |||
157 | switch (rule->action) { | ||
158 | case FR_ACT_TO_TBL: | ||
159 | break; | ||
160 | case FR_ACT_UNREACHABLE: | ||
161 | return -ENETUNREACH; | ||
162 | case FR_ACT_PROHIBIT: | ||
163 | return -EACCES; | ||
164 | case FR_ACT_BLACKHOLE: | ||
165 | default: | ||
166 | return -EINVAL; | ||
167 | } | ||
168 | |||
169 | mrt = ip6mr_get_table(rule->fr_net, rule->table); | ||
170 | if (mrt == NULL) | ||
171 | return -EAGAIN; | ||
172 | res->mrt = mrt; | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags) | ||
177 | { | ||
178 | return 1; | ||
179 | } | ||
180 | |||
181 | static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = { | ||
182 | FRA_GENERIC_POLICY, | ||
183 | }; | ||
184 | |||
185 | static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, | ||
186 | struct fib_rule_hdr *frh, struct nlattr **tb) | ||
187 | { | ||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, | ||
192 | struct nlattr **tb) | ||
193 | { | ||
194 | return 1; | ||
195 | } | ||
196 | |||
197 | static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, | ||
198 | struct fib_rule_hdr *frh) | ||
199 | { | ||
200 | frh->dst_len = 0; | ||
201 | frh->src_len = 0; | ||
202 | frh->tos = 0; | ||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static const struct fib_rules_ops __net_initdata ip6mr_rules_ops_template = { | ||
207 | .family = RTNL_FAMILY_IP6MR, | ||
208 | .rule_size = sizeof(struct ip6mr_rule), | ||
209 | .addr_size = sizeof(struct in6_addr), | ||
210 | .action = ip6mr_rule_action, | ||
211 | .match = ip6mr_rule_match, | ||
212 | .configure = ip6mr_rule_configure, | ||
213 | .compare = ip6mr_rule_compare, | ||
214 | .default_pref = fib_default_rule_pref, | ||
215 | .fill = ip6mr_rule_fill, | ||
216 | .nlgroup = RTNLGRP_IPV6_RULE, | ||
217 | .policy = ip6mr_rule_policy, | ||
218 | .owner = THIS_MODULE, | ||
219 | }; | ||
86 | 220 | ||
87 | static struct timer_list ipmr_expire_timer; | 221 | static int __net_init ip6mr_rules_init(struct net *net) |
222 | { | ||
223 | struct fib_rules_ops *ops; | ||
224 | struct mr6_table *mrt; | ||
225 | int err; | ||
226 | |||
227 | ops = fib_rules_register(&ip6mr_rules_ops_template, net); | ||
228 | if (IS_ERR(ops)) | ||
229 | return PTR_ERR(ops); | ||
230 | |||
231 | INIT_LIST_HEAD(&net->ipv6.mr6_tables); | ||
232 | |||
233 | mrt = ip6mr_new_table(net, RT6_TABLE_DFLT); | ||
234 | if (mrt == NULL) { | ||
235 | err = -ENOMEM; | ||
236 | goto err1; | ||
237 | } | ||
88 | 238 | ||
239 | err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0); | ||
240 | if (err < 0) | ||
241 | goto err2; | ||
242 | |||
243 | net->ipv6.mr6_rules_ops = ops; | ||
244 | return 0; | ||
245 | |||
246 | err2: | ||
247 | kfree(mrt); | ||
248 | err1: | ||
249 | fib_rules_unregister(ops); | ||
250 | return err; | ||
251 | } | ||
252 | |||
253 | static void __net_exit ip6mr_rules_exit(struct net *net) | ||
254 | { | ||
255 | struct mr6_table *mrt, *next; | ||
256 | |||
257 | list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) | ||
258 | ip6mr_free_table(mrt); | ||
259 | fib_rules_unregister(net->ipv6.mr6_rules_ops); | ||
260 | } | ||
261 | #else | ||
262 | #define ip6mr_for_each_table(mrt, net) \ | ||
263 | for (mrt = net->ipv6.mrt6; mrt; mrt = NULL) | ||
264 | |||
265 | static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) | ||
266 | { | ||
267 | return net->ipv6.mrt6; | ||
268 | } | ||
269 | |||
270 | static int ip6mr_fib_lookup(struct net *net, struct flowi *flp, | ||
271 | struct mr6_table **mrt) | ||
272 | { | ||
273 | *mrt = net->ipv6.mrt6; | ||
274 | return 0; | ||
275 | } | ||
276 | |||
277 | static int __net_init ip6mr_rules_init(struct net *net) | ||
278 | { | ||
279 | net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT); | ||
280 | return net->ipv6.mrt6 ? 0 : -ENOMEM; | ||
281 | } | ||
282 | |||
283 | static void __net_exit ip6mr_rules_exit(struct net *net) | ||
284 | { | ||
285 | ip6mr_free_table(net->ipv6.mrt6); | ||
286 | } | ||
287 | #endif | ||
288 | |||
289 | static struct mr6_table *ip6mr_new_table(struct net *net, u32 id) | ||
290 | { | ||
291 | struct mr6_table *mrt; | ||
292 | unsigned int i; | ||
293 | |||
294 | mrt = ip6mr_get_table(net, id); | ||
295 | if (mrt != NULL) | ||
296 | return mrt; | ||
297 | |||
298 | mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); | ||
299 | if (mrt == NULL) | ||
300 | return NULL; | ||
301 | mrt->id = id; | ||
302 | write_pnet(&mrt->net, net); | ||
303 | |||
304 | /* Forwarding cache */ | ||
305 | for (i = 0; i < MFC6_LINES; i++) | ||
306 | INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]); | ||
307 | |||
308 | INIT_LIST_HEAD(&mrt->mfc6_unres_queue); | ||
309 | |||
310 | setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process, | ||
311 | (unsigned long)mrt); | ||
312 | |||
313 | #ifdef CONFIG_IPV6_PIMSM_V2 | ||
314 | mrt->mroute_reg_vif_num = -1; | ||
315 | #endif | ||
316 | #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES | ||
317 | list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables); | ||
318 | #endif | ||
319 | return mrt; | ||
320 | } | ||
321 | |||
322 | static void ip6mr_free_table(struct mr6_table *mrt) | ||
323 | { | ||
324 | del_timer(&mrt->ipmr_expire_timer); | ||
325 | mroute_clean_tables(mrt); | ||
326 | kfree(mrt); | ||
327 | } | ||
89 | 328 | ||
90 | #ifdef CONFIG_PROC_FS | 329 | #ifdef CONFIG_PROC_FS |
91 | 330 | ||
92 | struct ipmr_mfc_iter { | 331 | struct ipmr_mfc_iter { |
93 | struct seq_net_private p; | 332 | struct seq_net_private p; |
94 | struct mfc6_cache **cache; | 333 | struct mr6_table *mrt; |
334 | struct list_head *cache; | ||
95 | int ct; | 335 | int ct; |
96 | }; | 336 | }; |
97 | 337 | ||
@@ -99,22 +339,22 @@ struct ipmr_mfc_iter { | |||
99 | static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net, | 339 | static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net, |
100 | struct ipmr_mfc_iter *it, loff_t pos) | 340 | struct ipmr_mfc_iter *it, loff_t pos) |
101 | { | 341 | { |
342 | struct mr6_table *mrt = it->mrt; | ||
102 | struct mfc6_cache *mfc; | 343 | struct mfc6_cache *mfc; |
103 | 344 | ||
104 | it->cache = net->ipv6.mfc6_cache_array; | ||
105 | read_lock(&mrt_lock); | 345 | read_lock(&mrt_lock); |
106 | for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) | 346 | for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) { |
107 | for (mfc = net->ipv6.mfc6_cache_array[it->ct]; | 347 | it->cache = &mrt->mfc6_cache_array[it->ct]; |
108 | mfc; mfc = mfc->next) | 348 | list_for_each_entry(mfc, it->cache, list) |
109 | if (pos-- == 0) | 349 | if (pos-- == 0) |
110 | return mfc; | 350 | return mfc; |
351 | } | ||
111 | read_unlock(&mrt_lock); | 352 | read_unlock(&mrt_lock); |
112 | 353 | ||
113 | it->cache = &mfc_unres_queue; | ||
114 | spin_lock_bh(&mfc_unres_lock); | 354 | spin_lock_bh(&mfc_unres_lock); |
115 | for (mfc = mfc_unres_queue; mfc; mfc = mfc->next) | 355 | it->cache = &mrt->mfc6_unres_queue; |
116 | if (net_eq(mfc6_net(mfc), net) && | 356 | list_for_each_entry(mfc, it->cache, list) |
117 | pos-- == 0) | 357 | if (pos-- == 0) |
118 | return mfc; | 358 | return mfc; |
119 | spin_unlock_bh(&mfc_unres_lock); | 359 | spin_unlock_bh(&mfc_unres_lock); |
120 | 360 | ||
@@ -122,15 +362,13 @@ static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net, | |||
122 | return NULL; | 362 | return NULL; |
123 | } | 363 | } |
124 | 364 | ||
125 | |||
126 | |||
127 | |||
128 | /* | 365 | /* |
129 | * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif | 366 | * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif |
130 | */ | 367 | */ |
131 | 368 | ||
132 | struct ipmr_vif_iter { | 369 | struct ipmr_vif_iter { |
133 | struct seq_net_private p; | 370 | struct seq_net_private p; |
371 | struct mr6_table *mrt; | ||
134 | int ct; | 372 | int ct; |
135 | }; | 373 | }; |
136 | 374 | ||
@@ -138,11 +376,13 @@ static struct mif_device *ip6mr_vif_seq_idx(struct net *net, | |||
138 | struct ipmr_vif_iter *iter, | 376 | struct ipmr_vif_iter *iter, |
139 | loff_t pos) | 377 | loff_t pos) |
140 | { | 378 | { |
141 | for (iter->ct = 0; iter->ct < net->ipv6.maxvif; ++iter->ct) { | 379 | struct mr6_table *mrt = iter->mrt; |
142 | if (!MIF_EXISTS(net, iter->ct)) | 380 | |
381 | for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) { | ||
382 | if (!MIF_EXISTS(mrt, iter->ct)) | ||
143 | continue; | 383 | continue; |
144 | if (pos-- == 0) | 384 | if (pos-- == 0) |
145 | return &net->ipv6.vif6_table[iter->ct]; | 385 | return &mrt->vif6_table[iter->ct]; |
146 | } | 386 | } |
147 | return NULL; | 387 | return NULL; |
148 | } | 388 | } |
@@ -150,7 +390,15 @@ static struct mif_device *ip6mr_vif_seq_idx(struct net *net, | |||
150 | static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos) | 390 | static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos) |
151 | __acquires(mrt_lock) | 391 | __acquires(mrt_lock) |
152 | { | 392 | { |
393 | struct ipmr_vif_iter *iter = seq->private; | ||
153 | struct net *net = seq_file_net(seq); | 394 | struct net *net = seq_file_net(seq); |
395 | struct mr6_table *mrt; | ||
396 | |||
397 | mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); | ||
398 | if (mrt == NULL) | ||
399 | return ERR_PTR(-ENOENT); | ||
400 | |||
401 | iter->mrt = mrt; | ||
154 | 402 | ||
155 | read_lock(&mrt_lock); | 403 | read_lock(&mrt_lock); |
156 | return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1) | 404 | return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1) |
@@ -161,15 +409,16 @@ static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
161 | { | 409 | { |
162 | struct ipmr_vif_iter *iter = seq->private; | 410 | struct ipmr_vif_iter *iter = seq->private; |
163 | struct net *net = seq_file_net(seq); | 411 | struct net *net = seq_file_net(seq); |
412 | struct mr6_table *mrt = iter->mrt; | ||
164 | 413 | ||
165 | ++*pos; | 414 | ++*pos; |
166 | if (v == SEQ_START_TOKEN) | 415 | if (v == SEQ_START_TOKEN) |
167 | return ip6mr_vif_seq_idx(net, iter, 0); | 416 | return ip6mr_vif_seq_idx(net, iter, 0); |
168 | 417 | ||
169 | while (++iter->ct < net->ipv6.maxvif) { | 418 | while (++iter->ct < mrt->maxvif) { |
170 | if (!MIF_EXISTS(net, iter->ct)) | 419 | if (!MIF_EXISTS(mrt, iter->ct)) |
171 | continue; | 420 | continue; |
172 | return &net->ipv6.vif6_table[iter->ct]; | 421 | return &mrt->vif6_table[iter->ct]; |
173 | } | 422 | } |
174 | return NULL; | 423 | return NULL; |
175 | } | 424 | } |
@@ -182,7 +431,8 @@ static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v) | |||
182 | 431 | ||
183 | static int ip6mr_vif_seq_show(struct seq_file *seq, void *v) | 432 | static int ip6mr_vif_seq_show(struct seq_file *seq, void *v) |
184 | { | 433 | { |
185 | struct net *net = seq_file_net(seq); | 434 | struct ipmr_vif_iter *iter = seq->private; |
435 | struct mr6_table *mrt = iter->mrt; | ||
186 | 436 | ||
187 | if (v == SEQ_START_TOKEN) { | 437 | if (v == SEQ_START_TOKEN) { |
188 | seq_puts(seq, | 438 | seq_puts(seq, |
@@ -193,7 +443,7 @@ static int ip6mr_vif_seq_show(struct seq_file *seq, void *v) | |||
193 | 443 | ||
194 | seq_printf(seq, | 444 | seq_printf(seq, |
195 | "%2td %-10s %8ld %7ld %8ld %7ld %05X\n", | 445 | "%2td %-10s %8ld %7ld %8ld %7ld %05X\n", |
196 | vif - net->ipv6.vif6_table, | 446 | vif - mrt->vif6_table, |
197 | name, vif->bytes_in, vif->pkt_in, | 447 | name, vif->bytes_in, vif->pkt_in, |
198 | vif->bytes_out, vif->pkt_out, | 448 | vif->bytes_out, vif->pkt_out, |
199 | vif->flags); | 449 | vif->flags); |
@@ -224,8 +474,15 @@ static const struct file_operations ip6mr_vif_fops = { | |||
224 | 474 | ||
225 | static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) | 475 | static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) |
226 | { | 476 | { |
477 | struct ipmr_mfc_iter *it = seq->private; | ||
227 | struct net *net = seq_file_net(seq); | 478 | struct net *net = seq_file_net(seq); |
479 | struct mr6_table *mrt; | ||
480 | |||
481 | mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); | ||
482 | if (mrt == NULL) | ||
483 | return ERR_PTR(-ENOENT); | ||
228 | 484 | ||
485 | it->mrt = mrt; | ||
229 | return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) | 486 | return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) |
230 | : SEQ_START_TOKEN; | 487 | : SEQ_START_TOKEN; |
231 | } | 488 | } |
@@ -235,35 +492,36 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
235 | struct mfc6_cache *mfc = v; | 492 | struct mfc6_cache *mfc = v; |
236 | struct ipmr_mfc_iter *it = seq->private; | 493 | struct ipmr_mfc_iter *it = seq->private; |
237 | struct net *net = seq_file_net(seq); | 494 | struct net *net = seq_file_net(seq); |
495 | struct mr6_table *mrt = it->mrt; | ||
238 | 496 | ||
239 | ++*pos; | 497 | ++*pos; |
240 | 498 | ||
241 | if (v == SEQ_START_TOKEN) | 499 | if (v == SEQ_START_TOKEN) |
242 | return ipmr_mfc_seq_idx(net, seq->private, 0); | 500 | return ipmr_mfc_seq_idx(net, seq->private, 0); |
243 | 501 | ||
244 | if (mfc->next) | 502 | if (mfc->list.next != it->cache) |
245 | return mfc->next; | 503 | return list_entry(mfc->list.next, struct mfc6_cache, list); |
246 | 504 | ||
247 | if (it->cache == &mfc_unres_queue) | 505 | if (it->cache == &mrt->mfc6_unres_queue) |
248 | goto end_of_list; | 506 | goto end_of_list; |
249 | 507 | ||
250 | BUG_ON(it->cache != net->ipv6.mfc6_cache_array); | 508 | BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]); |
251 | 509 | ||
252 | while (++it->ct < MFC6_LINES) { | 510 | while (++it->ct < MFC6_LINES) { |
253 | mfc = net->ipv6.mfc6_cache_array[it->ct]; | 511 | it->cache = &mrt->mfc6_cache_array[it->ct]; |
254 | if (mfc) | 512 | if (list_empty(it->cache)) |
255 | return mfc; | 513 | continue; |
514 | return list_first_entry(it->cache, struct mfc6_cache, list); | ||
256 | } | 515 | } |
257 | 516 | ||
258 | /* exhausted cache_array, show unresolved */ | 517 | /* exhausted cache_array, show unresolved */ |
259 | read_unlock(&mrt_lock); | 518 | read_unlock(&mrt_lock); |
260 | it->cache = &mfc_unres_queue; | 519 | it->cache = &mrt->mfc6_unres_queue; |
261 | it->ct = 0; | 520 | it->ct = 0; |
262 | 521 | ||
263 | spin_lock_bh(&mfc_unres_lock); | 522 | spin_lock_bh(&mfc_unres_lock); |
264 | mfc = mfc_unres_queue; | 523 | if (!list_empty(it->cache)) |
265 | if (mfc) | 524 | return list_first_entry(it->cache, struct mfc6_cache, list); |
266 | return mfc; | ||
267 | 525 | ||
268 | end_of_list: | 526 | end_of_list: |
269 | spin_unlock_bh(&mfc_unres_lock); | 527 | spin_unlock_bh(&mfc_unres_lock); |
@@ -275,18 +533,17 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
275 | static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) | 533 | static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) |
276 | { | 534 | { |
277 | struct ipmr_mfc_iter *it = seq->private; | 535 | struct ipmr_mfc_iter *it = seq->private; |
278 | struct net *net = seq_file_net(seq); | 536 | struct mr6_table *mrt = it->mrt; |
279 | 537 | ||
280 | if (it->cache == &mfc_unres_queue) | 538 | if (it->cache == &mrt->mfc6_unres_queue) |
281 | spin_unlock_bh(&mfc_unres_lock); | 539 | spin_unlock_bh(&mfc_unres_lock); |
282 | else if (it->cache == net->ipv6.mfc6_cache_array) | 540 | else if (it->cache == mrt->mfc6_cache_array) |
283 | read_unlock(&mrt_lock); | 541 | read_unlock(&mrt_lock); |
284 | } | 542 | } |
285 | 543 | ||
286 | static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) | 544 | static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) |
287 | { | 545 | { |
288 | int n; | 546 | int n; |
289 | struct net *net = seq_file_net(seq); | ||
290 | 547 | ||
291 | if (v == SEQ_START_TOKEN) { | 548 | if (v == SEQ_START_TOKEN) { |
292 | seq_puts(seq, | 549 | seq_puts(seq, |
@@ -296,19 +553,20 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) | |||
296 | } else { | 553 | } else { |
297 | const struct mfc6_cache *mfc = v; | 554 | const struct mfc6_cache *mfc = v; |
298 | const struct ipmr_mfc_iter *it = seq->private; | 555 | const struct ipmr_mfc_iter *it = seq->private; |
556 | struct mr6_table *mrt = it->mrt; | ||
299 | 557 | ||
300 | seq_printf(seq, "%pI6 %pI6 %-3hd", | 558 | seq_printf(seq, "%pI6 %pI6 %-3hd", |
301 | &mfc->mf6c_mcastgrp, &mfc->mf6c_origin, | 559 | &mfc->mf6c_mcastgrp, &mfc->mf6c_origin, |
302 | mfc->mf6c_parent); | 560 | mfc->mf6c_parent); |
303 | 561 | ||
304 | if (it->cache != &mfc_unres_queue) { | 562 | if (it->cache != &mrt->mfc6_unres_queue) { |
305 | seq_printf(seq, " %8lu %8lu %8lu", | 563 | seq_printf(seq, " %8lu %8lu %8lu", |
306 | mfc->mfc_un.res.pkt, | 564 | mfc->mfc_un.res.pkt, |
307 | mfc->mfc_un.res.bytes, | 565 | mfc->mfc_un.res.bytes, |
308 | mfc->mfc_un.res.wrong_if); | 566 | mfc->mfc_un.res.wrong_if); |
309 | for (n = mfc->mfc_un.res.minvif; | 567 | for (n = mfc->mfc_un.res.minvif; |
310 | n < mfc->mfc_un.res.maxvif; n++) { | 568 | n < mfc->mfc_un.res.maxvif; n++) { |
311 | if (MIF_EXISTS(net, n) && | 569 | if (MIF_EXISTS(mrt, n) && |
312 | mfc->mfc_un.res.ttls[n] < 255) | 570 | mfc->mfc_un.res.ttls[n] < 255) |
313 | seq_printf(seq, | 571 | seq_printf(seq, |
314 | " %2d:%-3d", | 572 | " %2d:%-3d", |
@@ -355,7 +613,12 @@ static int pim6_rcv(struct sk_buff *skb) | |||
355 | struct ipv6hdr *encap; | 613 | struct ipv6hdr *encap; |
356 | struct net_device *reg_dev = NULL; | 614 | struct net_device *reg_dev = NULL; |
357 | struct net *net = dev_net(skb->dev); | 615 | struct net *net = dev_net(skb->dev); |
358 | int reg_vif_num = net->ipv6.mroute_reg_vif_num; | 616 | struct mr6_table *mrt; |
617 | struct flowi fl = { | ||
618 | .iif = skb->dev->ifindex, | ||
619 | .mark = skb->mark, | ||
620 | }; | ||
621 | int reg_vif_num; | ||
359 | 622 | ||
360 | if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) | 623 | if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) |
361 | goto drop; | 624 | goto drop; |
@@ -378,9 +641,13 @@ static int pim6_rcv(struct sk_buff *skb) | |||
378 | ntohs(encap->payload_len) + sizeof(*pim) > skb->len) | 641 | ntohs(encap->payload_len) + sizeof(*pim) > skb->len) |
379 | goto drop; | 642 | goto drop; |
380 | 643 | ||
644 | if (ip6mr_fib_lookup(net, &fl, &mrt) < 0) | ||
645 | goto drop; | ||
646 | reg_vif_num = mrt->mroute_reg_vif_num; | ||
647 | |||
381 | read_lock(&mrt_lock); | 648 | read_lock(&mrt_lock); |
382 | if (reg_vif_num >= 0) | 649 | if (reg_vif_num >= 0) |
383 | reg_dev = net->ipv6.vif6_table[reg_vif_num].dev; | 650 | reg_dev = mrt->vif6_table[reg_vif_num].dev; |
384 | if (reg_dev) | 651 | if (reg_dev) |
385 | dev_hold(reg_dev); | 652 | dev_hold(reg_dev); |
386 | read_unlock(&mrt_lock); | 653 | read_unlock(&mrt_lock); |
@@ -417,12 +684,22 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, | |||
417 | struct net_device *dev) | 684 | struct net_device *dev) |
418 | { | 685 | { |
419 | struct net *net = dev_net(dev); | 686 | struct net *net = dev_net(dev); |
687 | struct mr6_table *mrt; | ||
688 | struct flowi fl = { | ||
689 | .oif = dev->ifindex, | ||
690 | .iif = skb->skb_iif, | ||
691 | .mark = skb->mark, | ||
692 | }; | ||
693 | int err; | ||
694 | |||
695 | err = ip6mr_fib_lookup(net, &fl, &mrt); | ||
696 | if (err < 0) | ||
697 | return err; | ||
420 | 698 | ||
421 | read_lock(&mrt_lock); | 699 | read_lock(&mrt_lock); |
422 | dev->stats.tx_bytes += skb->len; | 700 | dev->stats.tx_bytes += skb->len; |
423 | dev->stats.tx_packets++; | 701 | dev->stats.tx_packets++; |
424 | ip6mr_cache_report(net, skb, net->ipv6.mroute_reg_vif_num, | 702 | ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT); |
425 | MRT6MSG_WHOLEPKT); | ||
426 | read_unlock(&mrt_lock); | 703 | read_unlock(&mrt_lock); |
427 | kfree_skb(skb); | 704 | kfree_skb(skb); |
428 | return NETDEV_TX_OK; | 705 | return NETDEV_TX_OK; |
@@ -442,11 +719,17 @@ static void reg_vif_setup(struct net_device *dev) | |||
442 | dev->features |= NETIF_F_NETNS_LOCAL; | 719 | dev->features |= NETIF_F_NETNS_LOCAL; |
443 | } | 720 | } |
444 | 721 | ||
445 | static struct net_device *ip6mr_reg_vif(struct net *net) | 722 | static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt) |
446 | { | 723 | { |
447 | struct net_device *dev; | 724 | struct net_device *dev; |
725 | char name[IFNAMSIZ]; | ||
448 | 726 | ||
449 | dev = alloc_netdev(0, "pim6reg", reg_vif_setup); | 727 | if (mrt->id == RT6_TABLE_DFLT) |
728 | sprintf(name, "pim6reg"); | ||
729 | else | ||
730 | sprintf(name, "pim6reg%u", mrt->id); | ||
731 | |||
732 | dev = alloc_netdev(0, name, reg_vif_setup); | ||
450 | if (dev == NULL) | 733 | if (dev == NULL) |
451 | return NULL; | 734 | return NULL; |
452 | 735 | ||
@@ -478,15 +761,16 @@ failure: | |||
478 | * Delete a VIF entry | 761 | * Delete a VIF entry |
479 | */ | 762 | */ |
480 | 763 | ||
481 | static int mif6_delete(struct net *net, int vifi, struct list_head *head) | 764 | static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head) |
482 | { | 765 | { |
483 | struct mif_device *v; | 766 | struct mif_device *v; |
484 | struct net_device *dev; | 767 | struct net_device *dev; |
485 | struct inet6_dev *in6_dev; | 768 | struct inet6_dev *in6_dev; |
486 | if (vifi < 0 || vifi >= net->ipv6.maxvif) | 769 | |
770 | if (vifi < 0 || vifi >= mrt->maxvif) | ||
487 | return -EADDRNOTAVAIL; | 771 | return -EADDRNOTAVAIL; |
488 | 772 | ||
489 | v = &net->ipv6.vif6_table[vifi]; | 773 | v = &mrt->vif6_table[vifi]; |
490 | 774 | ||
491 | write_lock_bh(&mrt_lock); | 775 | write_lock_bh(&mrt_lock); |
492 | dev = v->dev; | 776 | dev = v->dev; |
@@ -498,17 +782,17 @@ static int mif6_delete(struct net *net, int vifi, struct list_head *head) | |||
498 | } | 782 | } |
499 | 783 | ||
500 | #ifdef CONFIG_IPV6_PIMSM_V2 | 784 | #ifdef CONFIG_IPV6_PIMSM_V2 |
501 | if (vifi == net->ipv6.mroute_reg_vif_num) | 785 | if (vifi == mrt->mroute_reg_vif_num) |
502 | net->ipv6.mroute_reg_vif_num = -1; | 786 | mrt->mroute_reg_vif_num = -1; |
503 | #endif | 787 | #endif |
504 | 788 | ||
505 | if (vifi + 1 == net->ipv6.maxvif) { | 789 | if (vifi + 1 == mrt->maxvif) { |
506 | int tmp; | 790 | int tmp; |
507 | for (tmp = vifi - 1; tmp >= 0; tmp--) { | 791 | for (tmp = vifi - 1; tmp >= 0; tmp--) { |
508 | if (MIF_EXISTS(net, tmp)) | 792 | if (MIF_EXISTS(mrt, tmp)) |
509 | break; | 793 | break; |
510 | } | 794 | } |
511 | net->ipv6.maxvif = tmp + 1; | 795 | mrt->maxvif = tmp + 1; |
512 | } | 796 | } |
513 | 797 | ||
514 | write_unlock_bh(&mrt_lock); | 798 | write_unlock_bh(&mrt_lock); |
@@ -528,7 +812,6 @@ static int mif6_delete(struct net *net, int vifi, struct list_head *head) | |||
528 | 812 | ||
529 | static inline void ip6mr_cache_free(struct mfc6_cache *c) | 813 | static inline void ip6mr_cache_free(struct mfc6_cache *c) |
530 | { | 814 | { |
531 | release_net(mfc6_net(c)); | ||
532 | kmem_cache_free(mrt_cachep, c); | 815 | kmem_cache_free(mrt_cachep, c); |
533 | } | 816 | } |
534 | 817 | ||
@@ -536,12 +819,12 @@ static inline void ip6mr_cache_free(struct mfc6_cache *c) | |||
536 | and reporting error to netlink readers. | 819 | and reporting error to netlink readers. |
537 | */ | 820 | */ |
538 | 821 | ||
539 | static void ip6mr_destroy_unres(struct mfc6_cache *c) | 822 | static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c) |
540 | { | 823 | { |
824 | struct net *net = read_pnet(&mrt->net); | ||
541 | struct sk_buff *skb; | 825 | struct sk_buff *skb; |
542 | struct net *net = mfc6_net(c); | ||
543 | 826 | ||
544 | atomic_dec(&net->ipv6.cache_resolve_queue_len); | 827 | atomic_dec(&mrt->cache_resolve_queue_len); |
545 | 828 | ||
546 | while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) { | 829 | while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) { |
547 | if (ipv6_hdr(skb)->version == 0) { | 830 | if (ipv6_hdr(skb)->version == 0) { |
@@ -559,60 +842,59 @@ static void ip6mr_destroy_unres(struct mfc6_cache *c) | |||
559 | } | 842 | } |
560 | 843 | ||
561 | 844 | ||
562 | /* Single timer process for all the unresolved queue. */ | 845 | /* Timer process for all the unresolved queue. */ |
563 | 846 | ||
564 | static void ipmr_do_expire_process(unsigned long dummy) | 847 | static void ipmr_do_expire_process(struct mr6_table *mrt) |
565 | { | 848 | { |
566 | unsigned long now = jiffies; | 849 | unsigned long now = jiffies; |
567 | unsigned long expires = 10 * HZ; | 850 | unsigned long expires = 10 * HZ; |
568 | struct mfc6_cache *c, **cp; | 851 | struct mfc6_cache *c, *next; |
569 | 852 | ||
570 | cp = &mfc_unres_queue; | 853 | list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) { |
571 | |||
572 | while ((c = *cp) != NULL) { | ||
573 | if (time_after(c->mfc_un.unres.expires, now)) { | 854 | if (time_after(c->mfc_un.unres.expires, now)) { |
574 | /* not yet... */ | 855 | /* not yet... */ |
575 | unsigned long interval = c->mfc_un.unres.expires - now; | 856 | unsigned long interval = c->mfc_un.unres.expires - now; |
576 | if (interval < expires) | 857 | if (interval < expires) |
577 | expires = interval; | 858 | expires = interval; |
578 | cp = &c->next; | ||
579 | continue; | 859 | continue; |
580 | } | 860 | } |
581 | 861 | ||
582 | *cp = c->next; | 862 | list_del(&c->list); |
583 | ip6mr_destroy_unres(c); | 863 | ip6mr_destroy_unres(mrt, c); |
584 | } | 864 | } |
585 | 865 | ||
586 | if (mfc_unres_queue != NULL) | 866 | if (!list_empty(&mrt->mfc6_unres_queue)) |
587 | mod_timer(&ipmr_expire_timer, jiffies + expires); | 867 | mod_timer(&mrt->ipmr_expire_timer, jiffies + expires); |
588 | } | 868 | } |
589 | 869 | ||
590 | static void ipmr_expire_process(unsigned long dummy) | 870 | static void ipmr_expire_process(unsigned long arg) |
591 | { | 871 | { |
872 | struct mr6_table *mrt = (struct mr6_table *)arg; | ||
873 | |||
592 | if (!spin_trylock(&mfc_unres_lock)) { | 874 | if (!spin_trylock(&mfc_unres_lock)) { |
593 | mod_timer(&ipmr_expire_timer, jiffies + 1); | 875 | mod_timer(&mrt->ipmr_expire_timer, jiffies + 1); |
594 | return; | 876 | return; |
595 | } | 877 | } |
596 | 878 | ||
597 | if (mfc_unres_queue != NULL) | 879 | if (!list_empty(&mrt->mfc6_unres_queue)) |
598 | ipmr_do_expire_process(dummy); | 880 | ipmr_do_expire_process(mrt); |
599 | 881 | ||
600 | spin_unlock(&mfc_unres_lock); | 882 | spin_unlock(&mfc_unres_lock); |
601 | } | 883 | } |
602 | 884 | ||
603 | /* Fill oifs list. It is called under write locked mrt_lock. */ | 885 | /* Fill oifs list. It is called under write locked mrt_lock. */ |
604 | 886 | ||
605 | static void ip6mr_update_thresholds(struct mfc6_cache *cache, unsigned char *ttls) | 887 | static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache, |
888 | unsigned char *ttls) | ||
606 | { | 889 | { |
607 | int vifi; | 890 | int vifi; |
608 | struct net *net = mfc6_net(cache); | ||
609 | 891 | ||
610 | cache->mfc_un.res.minvif = MAXMIFS; | 892 | cache->mfc_un.res.minvif = MAXMIFS; |
611 | cache->mfc_un.res.maxvif = 0; | 893 | cache->mfc_un.res.maxvif = 0; |
612 | memset(cache->mfc_un.res.ttls, 255, MAXMIFS); | 894 | memset(cache->mfc_un.res.ttls, 255, MAXMIFS); |
613 | 895 | ||
614 | for (vifi = 0; vifi < net->ipv6.maxvif; vifi++) { | 896 | for (vifi = 0; vifi < mrt->maxvif; vifi++) { |
615 | if (MIF_EXISTS(net, vifi) && | 897 | if (MIF_EXISTS(mrt, vifi) && |
616 | ttls[vifi] && ttls[vifi] < 255) { | 898 | ttls[vifi] && ttls[vifi] < 255) { |
617 | cache->mfc_un.res.ttls[vifi] = ttls[vifi]; | 899 | cache->mfc_un.res.ttls[vifi] = ttls[vifi]; |
618 | if (cache->mfc_un.res.minvif > vifi) | 900 | if (cache->mfc_un.res.minvif > vifi) |
@@ -623,16 +905,17 @@ static void ip6mr_update_thresholds(struct mfc6_cache *cache, unsigned char *ttl | |||
623 | } | 905 | } |
624 | } | 906 | } |
625 | 907 | ||
626 | static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock) | 908 | static int mif6_add(struct net *net, struct mr6_table *mrt, |
909 | struct mif6ctl *vifc, int mrtsock) | ||
627 | { | 910 | { |
628 | int vifi = vifc->mif6c_mifi; | 911 | int vifi = vifc->mif6c_mifi; |
629 | struct mif_device *v = &net->ipv6.vif6_table[vifi]; | 912 | struct mif_device *v = &mrt->vif6_table[vifi]; |
630 | struct net_device *dev; | 913 | struct net_device *dev; |
631 | struct inet6_dev *in6_dev; | 914 | struct inet6_dev *in6_dev; |
632 | int err; | 915 | int err; |
633 | 916 | ||
634 | /* Is vif busy ? */ | 917 | /* Is vif busy ? */ |
635 | if (MIF_EXISTS(net, vifi)) | 918 | if (MIF_EXISTS(mrt, vifi)) |
636 | return -EADDRINUSE; | 919 | return -EADDRINUSE; |
637 | 920 | ||
638 | switch (vifc->mif6c_flags) { | 921 | switch (vifc->mif6c_flags) { |
@@ -642,9 +925,9 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock) | |||
642 | * Special Purpose VIF in PIM | 925 | * Special Purpose VIF in PIM |
643 | * All the packets will be sent to the daemon | 926 | * All the packets will be sent to the daemon |
644 | */ | 927 | */ |
645 | if (net->ipv6.mroute_reg_vif_num >= 0) | 928 | if (mrt->mroute_reg_vif_num >= 0) |
646 | return -EADDRINUSE; | 929 | return -EADDRINUSE; |
647 | dev = ip6mr_reg_vif(net); | 930 | dev = ip6mr_reg_vif(net, mrt); |
648 | if (!dev) | 931 | if (!dev) |
649 | return -ENOBUFS; | 932 | return -ENOBUFS; |
650 | err = dev_set_allmulti(dev, 1); | 933 | err = dev_set_allmulti(dev, 1); |
@@ -694,50 +977,48 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock) | |||
694 | v->dev = dev; | 977 | v->dev = dev; |
695 | #ifdef CONFIG_IPV6_PIMSM_V2 | 978 | #ifdef CONFIG_IPV6_PIMSM_V2 |
696 | if (v->flags & MIFF_REGISTER) | 979 | if (v->flags & MIFF_REGISTER) |
697 | net->ipv6.mroute_reg_vif_num = vifi; | 980 | mrt->mroute_reg_vif_num = vifi; |
698 | #endif | 981 | #endif |
699 | if (vifi + 1 > net->ipv6.maxvif) | 982 | if (vifi + 1 > mrt->maxvif) |
700 | net->ipv6.maxvif = vifi + 1; | 983 | mrt->maxvif = vifi + 1; |
701 | write_unlock_bh(&mrt_lock); | 984 | write_unlock_bh(&mrt_lock); |
702 | return 0; | 985 | return 0; |
703 | } | 986 | } |
704 | 987 | ||
705 | static struct mfc6_cache *ip6mr_cache_find(struct net *net, | 988 | static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt, |
706 | struct in6_addr *origin, | 989 | struct in6_addr *origin, |
707 | struct in6_addr *mcastgrp) | 990 | struct in6_addr *mcastgrp) |
708 | { | 991 | { |
709 | int line = MFC6_HASH(mcastgrp, origin); | 992 | int line = MFC6_HASH(mcastgrp, origin); |
710 | struct mfc6_cache *c; | 993 | struct mfc6_cache *c; |
711 | 994 | ||
712 | for (c = net->ipv6.mfc6_cache_array[line]; c; c = c->next) { | 995 | list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) { |
713 | if (ipv6_addr_equal(&c->mf6c_origin, origin) && | 996 | if (ipv6_addr_equal(&c->mf6c_origin, origin) && |
714 | ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) | 997 | ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) |
715 | break; | 998 | return c; |
716 | } | 999 | } |
717 | return c; | 1000 | return NULL; |
718 | } | 1001 | } |
719 | 1002 | ||
720 | /* | 1003 | /* |
721 | * Allocate a multicast cache entry | 1004 | * Allocate a multicast cache entry |
722 | */ | 1005 | */ |
723 | static struct mfc6_cache *ip6mr_cache_alloc(struct net *net) | 1006 | static struct mfc6_cache *ip6mr_cache_alloc(void) |
724 | { | 1007 | { |
725 | struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); | 1008 | struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); |
726 | if (c == NULL) | 1009 | if (c == NULL) |
727 | return NULL; | 1010 | return NULL; |
728 | c->mfc_un.res.minvif = MAXMIFS; | 1011 | c->mfc_un.res.minvif = MAXMIFS; |
729 | mfc6_net_set(c, net); | ||
730 | return c; | 1012 | return c; |
731 | } | 1013 | } |
732 | 1014 | ||
733 | static struct mfc6_cache *ip6mr_cache_alloc_unres(struct net *net) | 1015 | static struct mfc6_cache *ip6mr_cache_alloc_unres(void) |
734 | { | 1016 | { |
735 | struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); | 1017 | struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); |
736 | if (c == NULL) | 1018 | if (c == NULL) |
737 | return NULL; | 1019 | return NULL; |
738 | skb_queue_head_init(&c->mfc_un.unres.unresolved); | 1020 | skb_queue_head_init(&c->mfc_un.unres.unresolved); |
739 | c->mfc_un.unres.expires = jiffies + 10 * HZ; | 1021 | c->mfc_un.unres.expires = jiffies + 10 * HZ; |
740 | mfc6_net_set(c, net); | ||
741 | return c; | 1022 | return c; |
742 | } | 1023 | } |
743 | 1024 | ||
@@ -745,7 +1026,8 @@ static struct mfc6_cache *ip6mr_cache_alloc_unres(struct net *net) | |||
745 | * A cache entry has gone into a resolved state from queued | 1026 | * A cache entry has gone into a resolved state from queued |
746 | */ | 1027 | */ |
747 | 1028 | ||
748 | static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c) | 1029 | static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt, |
1030 | struct mfc6_cache *uc, struct mfc6_cache *c) | ||
749 | { | 1031 | { |
750 | struct sk_buff *skb; | 1032 | struct sk_buff *skb; |
751 | 1033 | ||
@@ -758,7 +1040,7 @@ static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c) | |||
758 | int err; | 1040 | int err; |
759 | struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr)); | 1041 | struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr)); |
760 | 1042 | ||
761 | if (ip6mr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) { | 1043 | if (__ip6mr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { |
762 | nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh; | 1044 | nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh; |
763 | } else { | 1045 | } else { |
764 | nlh->nlmsg_type = NLMSG_ERROR; | 1046 | nlh->nlmsg_type = NLMSG_ERROR; |
@@ -766,9 +1048,9 @@ static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c) | |||
766 | skb_trim(skb, nlh->nlmsg_len); | 1048 | skb_trim(skb, nlh->nlmsg_len); |
767 | ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE; | 1049 | ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE; |
768 | } | 1050 | } |
769 | err = rtnl_unicast(skb, mfc6_net(uc), NETLINK_CB(skb).pid); | 1051 | err = rtnl_unicast(skb, net, NETLINK_CB(skb).pid); |
770 | } else | 1052 | } else |
771 | ip6_mr_forward(skb, c); | 1053 | ip6_mr_forward(net, mrt, skb, c); |
772 | } | 1054 | } |
773 | } | 1055 | } |
774 | 1056 | ||
@@ -779,8 +1061,8 @@ static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c) | |||
779 | * Called under mrt_lock. | 1061 | * Called under mrt_lock. |
780 | */ | 1062 | */ |
781 | 1063 | ||
782 | static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, | 1064 | static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt, |
783 | int assert) | 1065 | mifi_t mifi, int assert) |
784 | { | 1066 | { |
785 | struct sk_buff *skb; | 1067 | struct sk_buff *skb; |
786 | struct mrt6msg *msg; | 1068 | struct mrt6msg *msg; |
@@ -816,7 +1098,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, | |||
816 | msg = (struct mrt6msg *)skb_transport_header(skb); | 1098 | msg = (struct mrt6msg *)skb_transport_header(skb); |
817 | msg->im6_mbz = 0; | 1099 | msg->im6_mbz = 0; |
818 | msg->im6_msgtype = MRT6MSG_WHOLEPKT; | 1100 | msg->im6_msgtype = MRT6MSG_WHOLEPKT; |
819 | msg->im6_mif = net->ipv6.mroute_reg_vif_num; | 1101 | msg->im6_mif = mrt->mroute_reg_vif_num; |
820 | msg->im6_pad = 0; | 1102 | msg->im6_pad = 0; |
821 | ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr); | 1103 | ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr); |
822 | ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr); | 1104 | ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr); |
@@ -851,7 +1133,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, | |||
851 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1133 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
852 | } | 1134 | } |
853 | 1135 | ||
854 | if (net->ipv6.mroute6_sk == NULL) { | 1136 | if (mrt->mroute6_sk == NULL) { |
855 | kfree_skb(skb); | 1137 | kfree_skb(skb); |
856 | return -EINVAL; | 1138 | return -EINVAL; |
857 | } | 1139 | } |
@@ -859,7 +1141,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, | |||
859 | /* | 1141 | /* |
860 | * Deliver to user space multicast routing algorithms | 1142 | * Deliver to user space multicast routing algorithms |
861 | */ | 1143 | */ |
862 | ret = sock_queue_rcv_skb(net->ipv6.mroute6_sk, skb); | 1144 | ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb); |
863 | if (ret < 0) { | 1145 | if (ret < 0) { |
864 | if (net_ratelimit()) | 1146 | if (net_ratelimit()) |
865 | printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n"); | 1147 | printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n"); |
@@ -874,26 +1156,28 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, | |||
874 | */ | 1156 | */ |
875 | 1157 | ||
876 | static int | 1158 | static int |
877 | ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) | 1159 | ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb) |
878 | { | 1160 | { |
1161 | bool found = false; | ||
879 | int err; | 1162 | int err; |
880 | struct mfc6_cache *c; | 1163 | struct mfc6_cache *c; |
881 | 1164 | ||
882 | spin_lock_bh(&mfc_unres_lock); | 1165 | spin_lock_bh(&mfc_unres_lock); |
883 | for (c = mfc_unres_queue; c; c = c->next) { | 1166 | list_for_each_entry(c, &mrt->mfc6_unres_queue, list) { |
884 | if (net_eq(mfc6_net(c), net) && | 1167 | if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) && |
885 | ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) && | 1168 | ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) { |
886 | ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) | 1169 | found = true; |
887 | break; | 1170 | break; |
1171 | } | ||
888 | } | 1172 | } |
889 | 1173 | ||
890 | if (c == NULL) { | 1174 | if (!found) { |
891 | /* | 1175 | /* |
892 | * Create a new entry if allowable | 1176 | * Create a new entry if allowable |
893 | */ | 1177 | */ |
894 | 1178 | ||
895 | if (atomic_read(&net->ipv6.cache_resolve_queue_len) >= 10 || | 1179 | if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 || |
896 | (c = ip6mr_cache_alloc_unres(net)) == NULL) { | 1180 | (c = ip6mr_cache_alloc_unres()) == NULL) { |
897 | spin_unlock_bh(&mfc_unres_lock); | 1181 | spin_unlock_bh(&mfc_unres_lock); |
898 | 1182 | ||
899 | kfree_skb(skb); | 1183 | kfree_skb(skb); |
@@ -910,7 +1194,7 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) | |||
910 | /* | 1194 | /* |
911 | * Reflect first query at pim6sd | 1195 | * Reflect first query at pim6sd |
912 | */ | 1196 | */ |
913 | err = ip6mr_cache_report(net, skb, mifi, MRT6MSG_NOCACHE); | 1197 | err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE); |
914 | if (err < 0) { | 1198 | if (err < 0) { |
915 | /* If the report failed throw the cache entry | 1199 | /* If the report failed throw the cache entry |
916 | out - Brad Parker | 1200 | out - Brad Parker |
@@ -922,11 +1206,10 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) | |||
922 | return err; | 1206 | return err; |
923 | } | 1207 | } |
924 | 1208 | ||
925 | atomic_inc(&net->ipv6.cache_resolve_queue_len); | 1209 | atomic_inc(&mrt->cache_resolve_queue_len); |
926 | c->next = mfc_unres_queue; | 1210 | list_add(&c->list, &mrt->mfc6_unres_queue); |
927 | mfc_unres_queue = c; | ||
928 | 1211 | ||
929 | ipmr_do_expire_process(1); | 1212 | ipmr_do_expire_process(mrt); |
930 | } | 1213 | } |
931 | 1214 | ||
932 | /* | 1215 | /* |
@@ -948,19 +1231,18 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) | |||
948 | * MFC6 cache manipulation by user space | 1231 | * MFC6 cache manipulation by user space |
949 | */ | 1232 | */ |
950 | 1233 | ||
951 | static int ip6mr_mfc_delete(struct net *net, struct mf6cctl *mfc) | 1234 | static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc) |
952 | { | 1235 | { |
953 | int line; | 1236 | int line; |
954 | struct mfc6_cache *c, **cp; | 1237 | struct mfc6_cache *c, *next; |
955 | 1238 | ||
956 | line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr); | 1239 | line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr); |
957 | 1240 | ||
958 | for (cp = &net->ipv6.mfc6_cache_array[line]; | 1241 | list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) { |
959 | (c = *cp) != NULL; cp = &c->next) { | ||
960 | if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && | 1242 | if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && |
961 | ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) { | 1243 | ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) { |
962 | write_lock_bh(&mrt_lock); | 1244 | write_lock_bh(&mrt_lock); |
963 | *cp = c->next; | 1245 | list_del(&c->list); |
964 | write_unlock_bh(&mrt_lock); | 1246 | write_unlock_bh(&mrt_lock); |
965 | 1247 | ||
966 | ip6mr_cache_free(c); | 1248 | ip6mr_cache_free(c); |
@@ -975,6 +1257,7 @@ static int ip6mr_device_event(struct notifier_block *this, | |||
975 | { | 1257 | { |
976 | struct net_device *dev = ptr; | 1258 | struct net_device *dev = ptr; |
977 | struct net *net = dev_net(dev); | 1259 | struct net *net = dev_net(dev); |
1260 | struct mr6_table *mrt; | ||
978 | struct mif_device *v; | 1261 | struct mif_device *v; |
979 | int ct; | 1262 | int ct; |
980 | LIST_HEAD(list); | 1263 | LIST_HEAD(list); |
@@ -982,10 +1265,12 @@ static int ip6mr_device_event(struct notifier_block *this, | |||
982 | if (event != NETDEV_UNREGISTER) | 1265 | if (event != NETDEV_UNREGISTER) |
983 | return NOTIFY_DONE; | 1266 | return NOTIFY_DONE; |
984 | 1267 | ||
985 | v = &net->ipv6.vif6_table[0]; | 1268 | ip6mr_for_each_table(mrt, net) { |
986 | for (ct = 0; ct < net->ipv6.maxvif; ct++, v++) { | 1269 | v = &mrt->vif6_table[0]; |
987 | if (v->dev == dev) | 1270 | for (ct = 0; ct < mrt->maxvif; ct++, v++) { |
988 | mif6_delete(net, ct, &list); | 1271 | if (v->dev == dev) |
1272 | mif6_delete(mrt, ct, &list); | ||
1273 | } | ||
989 | } | 1274 | } |
990 | unregister_netdevice_many(&list); | 1275 | unregister_netdevice_many(&list); |
991 | 1276 | ||
@@ -1002,26 +1287,11 @@ static struct notifier_block ip6_mr_notifier = { | |||
1002 | 1287 | ||
1003 | static int __net_init ip6mr_net_init(struct net *net) | 1288 | static int __net_init ip6mr_net_init(struct net *net) |
1004 | { | 1289 | { |
1005 | int err = 0; | 1290 | int err; |
1006 | net->ipv6.vif6_table = kcalloc(MAXMIFS, sizeof(struct mif_device), | ||
1007 | GFP_KERNEL); | ||
1008 | if (!net->ipv6.vif6_table) { | ||
1009 | err = -ENOMEM; | ||
1010 | goto fail; | ||
1011 | } | ||
1012 | |||
1013 | /* Forwarding cache */ | ||
1014 | net->ipv6.mfc6_cache_array = kcalloc(MFC6_LINES, | ||
1015 | sizeof(struct mfc6_cache *), | ||
1016 | GFP_KERNEL); | ||
1017 | if (!net->ipv6.mfc6_cache_array) { | ||
1018 | err = -ENOMEM; | ||
1019 | goto fail_mfc6_cache; | ||
1020 | } | ||
1021 | 1291 | ||
1022 | #ifdef CONFIG_IPV6_PIMSM_V2 | 1292 | err = ip6mr_rules_init(net); |
1023 | net->ipv6.mroute_reg_vif_num = -1; | 1293 | if (err < 0) |
1024 | #endif | 1294 | goto fail; |
1025 | 1295 | ||
1026 | #ifdef CONFIG_PROC_FS | 1296 | #ifdef CONFIG_PROC_FS |
1027 | err = -ENOMEM; | 1297 | err = -ENOMEM; |
@@ -1030,16 +1300,15 @@ static int __net_init ip6mr_net_init(struct net *net) | |||
1030 | if (!proc_net_fops_create(net, "ip6_mr_cache", 0, &ip6mr_mfc_fops)) | 1300 | if (!proc_net_fops_create(net, "ip6_mr_cache", 0, &ip6mr_mfc_fops)) |
1031 | goto proc_cache_fail; | 1301 | goto proc_cache_fail; |
1032 | #endif | 1302 | #endif |
1303 | |||
1033 | return 0; | 1304 | return 0; |
1034 | 1305 | ||
1035 | #ifdef CONFIG_PROC_FS | 1306 | #ifdef CONFIG_PROC_FS |
1036 | proc_cache_fail: | 1307 | proc_cache_fail: |
1037 | proc_net_remove(net, "ip6_mr_vif"); | 1308 | proc_net_remove(net, "ip6_mr_vif"); |
1038 | proc_vif_fail: | 1309 | proc_vif_fail: |
1039 | kfree(net->ipv6.mfc6_cache_array); | 1310 | ip6mr_rules_exit(net); |
1040 | #endif | 1311 | #endif |
1041 | fail_mfc6_cache: | ||
1042 | kfree(net->ipv6.vif6_table); | ||
1043 | fail: | 1312 | fail: |
1044 | return err; | 1313 | return err; |
1045 | } | 1314 | } |
@@ -1050,9 +1319,7 @@ static void __net_exit ip6mr_net_exit(struct net *net) | |||
1050 | proc_net_remove(net, "ip6_mr_cache"); | 1319 | proc_net_remove(net, "ip6_mr_cache"); |
1051 | proc_net_remove(net, "ip6_mr_vif"); | 1320 | proc_net_remove(net, "ip6_mr_vif"); |
1052 | #endif | 1321 | #endif |
1053 | mroute_clean_tables(net); | 1322 | ip6mr_rules_exit(net); |
1054 | kfree(net->ipv6.mfc6_cache_array); | ||
1055 | kfree(net->ipv6.vif6_table); | ||
1056 | } | 1323 | } |
1057 | 1324 | ||
1058 | static struct pernet_operations ip6mr_net_ops = { | 1325 | static struct pernet_operations ip6mr_net_ops = { |
@@ -1075,7 +1342,6 @@ int __init ip6_mr_init(void) | |||
1075 | if (err) | 1342 | if (err) |
1076 | goto reg_pernet_fail; | 1343 | goto reg_pernet_fail; |
1077 | 1344 | ||
1078 | setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); | ||
1079 | err = register_netdevice_notifier(&ip6_mr_notifier); | 1345 | err = register_netdevice_notifier(&ip6_mr_notifier); |
1080 | if (err) | 1346 | if (err) |
1081 | goto reg_notif_fail; | 1347 | goto reg_notif_fail; |
@@ -1086,13 +1352,13 @@ int __init ip6_mr_init(void) | |||
1086 | goto add_proto_fail; | 1352 | goto add_proto_fail; |
1087 | } | 1353 | } |
1088 | #endif | 1354 | #endif |
1355 | rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL, ip6mr_rtm_dumproute); | ||
1089 | return 0; | 1356 | return 0; |
1090 | #ifdef CONFIG_IPV6_PIMSM_V2 | 1357 | #ifdef CONFIG_IPV6_PIMSM_V2 |
1091 | add_proto_fail: | 1358 | add_proto_fail: |
1092 | unregister_netdevice_notifier(&ip6_mr_notifier); | 1359 | unregister_netdevice_notifier(&ip6_mr_notifier); |
1093 | #endif | 1360 | #endif |
1094 | reg_notif_fail: | 1361 | reg_notif_fail: |
1095 | del_timer(&ipmr_expire_timer); | ||
1096 | unregister_pernet_subsys(&ip6mr_net_ops); | 1362 | unregister_pernet_subsys(&ip6mr_net_ops); |
1097 | reg_pernet_fail: | 1363 | reg_pernet_fail: |
1098 | kmem_cache_destroy(mrt_cachep); | 1364 | kmem_cache_destroy(mrt_cachep); |
@@ -1102,15 +1368,16 @@ reg_pernet_fail: | |||
1102 | void ip6_mr_cleanup(void) | 1368 | void ip6_mr_cleanup(void) |
1103 | { | 1369 | { |
1104 | unregister_netdevice_notifier(&ip6_mr_notifier); | 1370 | unregister_netdevice_notifier(&ip6_mr_notifier); |
1105 | del_timer(&ipmr_expire_timer); | ||
1106 | unregister_pernet_subsys(&ip6mr_net_ops); | 1371 | unregister_pernet_subsys(&ip6mr_net_ops); |
1107 | kmem_cache_destroy(mrt_cachep); | 1372 | kmem_cache_destroy(mrt_cachep); |
1108 | } | 1373 | } |
1109 | 1374 | ||
1110 | static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) | 1375 | static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt, |
1376 | struct mf6cctl *mfc, int mrtsock) | ||
1111 | { | 1377 | { |
1378 | bool found = false; | ||
1112 | int line; | 1379 | int line; |
1113 | struct mfc6_cache *uc, *c, **cp; | 1380 | struct mfc6_cache *uc, *c; |
1114 | unsigned char ttls[MAXMIFS]; | 1381 | unsigned char ttls[MAXMIFS]; |
1115 | int i; | 1382 | int i; |
1116 | 1383 | ||
@@ -1126,17 +1393,18 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) | |||
1126 | 1393 | ||
1127 | line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr); | 1394 | line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr); |
1128 | 1395 | ||
1129 | for (cp = &net->ipv6.mfc6_cache_array[line]; | 1396 | list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) { |
1130 | (c = *cp) != NULL; cp = &c->next) { | ||
1131 | if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && | 1397 | if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) && |
1132 | ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) | 1398 | ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) { |
1399 | found = true; | ||
1133 | break; | 1400 | break; |
1401 | } | ||
1134 | } | 1402 | } |
1135 | 1403 | ||
1136 | if (c != NULL) { | 1404 | if (found) { |
1137 | write_lock_bh(&mrt_lock); | 1405 | write_lock_bh(&mrt_lock); |
1138 | c->mf6c_parent = mfc->mf6cc_parent; | 1406 | c->mf6c_parent = mfc->mf6cc_parent; |
1139 | ip6mr_update_thresholds(c, ttls); | 1407 | ip6mr_update_thresholds(mrt, c, ttls); |
1140 | if (!mrtsock) | 1408 | if (!mrtsock) |
1141 | c->mfc_flags |= MFC_STATIC; | 1409 | c->mfc_flags |= MFC_STATIC; |
1142 | write_unlock_bh(&mrt_lock); | 1410 | write_unlock_bh(&mrt_lock); |
@@ -1146,43 +1414,42 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) | |||
1146 | if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr)) | 1414 | if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr)) |
1147 | return -EINVAL; | 1415 | return -EINVAL; |
1148 | 1416 | ||
1149 | c = ip6mr_cache_alloc(net); | 1417 | c = ip6mr_cache_alloc(); |
1150 | if (c == NULL) | 1418 | if (c == NULL) |
1151 | return -ENOMEM; | 1419 | return -ENOMEM; |
1152 | 1420 | ||
1153 | c->mf6c_origin = mfc->mf6cc_origin.sin6_addr; | 1421 | c->mf6c_origin = mfc->mf6cc_origin.sin6_addr; |
1154 | c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr; | 1422 | c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr; |
1155 | c->mf6c_parent = mfc->mf6cc_parent; | 1423 | c->mf6c_parent = mfc->mf6cc_parent; |
1156 | ip6mr_update_thresholds(c, ttls); | 1424 | ip6mr_update_thresholds(mrt, c, ttls); |
1157 | if (!mrtsock) | 1425 | if (!mrtsock) |
1158 | c->mfc_flags |= MFC_STATIC; | 1426 | c->mfc_flags |= MFC_STATIC; |
1159 | 1427 | ||
1160 | write_lock_bh(&mrt_lock); | 1428 | write_lock_bh(&mrt_lock); |
1161 | c->next = net->ipv6.mfc6_cache_array[line]; | 1429 | list_add(&c->list, &mrt->mfc6_cache_array[line]); |
1162 | net->ipv6.mfc6_cache_array[line] = c; | ||
1163 | write_unlock_bh(&mrt_lock); | 1430 | write_unlock_bh(&mrt_lock); |
1164 | 1431 | ||
1165 | /* | 1432 | /* |
1166 | * Check to see if we resolved a queued list. If so we | 1433 | * Check to see if we resolved a queued list. If so we |
1167 | * need to send on the frames and tidy up. | 1434 | * need to send on the frames and tidy up. |
1168 | */ | 1435 | */ |
1436 | found = false; | ||
1169 | spin_lock_bh(&mfc_unres_lock); | 1437 | spin_lock_bh(&mfc_unres_lock); |
1170 | for (cp = &mfc_unres_queue; (uc = *cp) != NULL; | 1438 | list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) { |
1171 | cp = &uc->next) { | 1439 | if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) && |
1172 | if (net_eq(mfc6_net(uc), net) && | ||
1173 | ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) && | ||
1174 | ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) { | 1440 | ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) { |
1175 | *cp = uc->next; | 1441 | list_del(&uc->list); |
1176 | atomic_dec(&net->ipv6.cache_resolve_queue_len); | 1442 | atomic_dec(&mrt->cache_resolve_queue_len); |
1443 | found = true; | ||
1177 | break; | 1444 | break; |
1178 | } | 1445 | } |
1179 | } | 1446 | } |
1180 | if (mfc_unres_queue == NULL) | 1447 | if (list_empty(&mrt->mfc6_unres_queue)) |
1181 | del_timer(&ipmr_expire_timer); | 1448 | del_timer(&mrt->ipmr_expire_timer); |
1182 | spin_unlock_bh(&mfc_unres_lock); | 1449 | spin_unlock_bh(&mfc_unres_lock); |
1183 | 1450 | ||
1184 | if (uc) { | 1451 | if (found) { |
1185 | ip6mr_cache_resolve(uc, c); | 1452 | ip6mr_cache_resolve(net, mrt, uc, c); |
1186 | ip6mr_cache_free(uc); | 1453 | ip6mr_cache_free(uc); |
1187 | } | 1454 | } |
1188 | return 0; | 1455 | return 0; |
@@ -1192,17 +1459,18 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) | |||
1192 | * Close the multicast socket, and clear the vif tables etc | 1459 | * Close the multicast socket, and clear the vif tables etc |
1193 | */ | 1460 | */ |
1194 | 1461 | ||
1195 | static void mroute_clean_tables(struct net *net) | 1462 | static void mroute_clean_tables(struct mr6_table *mrt) |
1196 | { | 1463 | { |
1197 | int i; | 1464 | int i; |
1198 | LIST_HEAD(list); | 1465 | LIST_HEAD(list); |
1466 | struct mfc6_cache *c, *next; | ||
1199 | 1467 | ||
1200 | /* | 1468 | /* |
1201 | * Shut down all active vif entries | 1469 | * Shut down all active vif entries |
1202 | */ | 1470 | */ |
1203 | for (i = 0; i < net->ipv6.maxvif; i++) { | 1471 | for (i = 0; i < mrt->maxvif; i++) { |
1204 | if (!(net->ipv6.vif6_table[i].flags & VIFF_STATIC)) | 1472 | if (!(mrt->vif6_table[i].flags & VIFF_STATIC)) |
1205 | mif6_delete(net, i, &list); | 1473 | mif6_delete(mrt, i, &list); |
1206 | } | 1474 | } |
1207 | unregister_netdevice_many(&list); | 1475 | unregister_netdevice_many(&list); |
1208 | 1476 | ||
@@ -1210,48 +1478,36 @@ static void mroute_clean_tables(struct net *net) | |||
1210 | * Wipe the cache | 1478 | * Wipe the cache |
1211 | */ | 1479 | */ |
1212 | for (i = 0; i < MFC6_LINES; i++) { | 1480 | for (i = 0; i < MFC6_LINES; i++) { |
1213 | struct mfc6_cache *c, **cp; | 1481 | list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) { |
1214 | 1482 | if (c->mfc_flags & MFC_STATIC) | |
1215 | cp = &net->ipv6.mfc6_cache_array[i]; | ||
1216 | while ((c = *cp) != NULL) { | ||
1217 | if (c->mfc_flags & MFC_STATIC) { | ||
1218 | cp = &c->next; | ||
1219 | continue; | 1483 | continue; |
1220 | } | ||
1221 | write_lock_bh(&mrt_lock); | 1484 | write_lock_bh(&mrt_lock); |
1222 | *cp = c->next; | 1485 | list_del(&c->list); |
1223 | write_unlock_bh(&mrt_lock); | 1486 | write_unlock_bh(&mrt_lock); |
1224 | 1487 | ||
1225 | ip6mr_cache_free(c); | 1488 | ip6mr_cache_free(c); |
1226 | } | 1489 | } |
1227 | } | 1490 | } |
1228 | 1491 | ||
1229 | if (atomic_read(&net->ipv6.cache_resolve_queue_len) != 0) { | 1492 | if (atomic_read(&mrt->cache_resolve_queue_len) != 0) { |
1230 | struct mfc6_cache *c, **cp; | ||
1231 | |||
1232 | spin_lock_bh(&mfc_unres_lock); | 1493 | spin_lock_bh(&mfc_unres_lock); |
1233 | cp = &mfc_unres_queue; | 1494 | list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) { |
1234 | while ((c = *cp) != NULL) { | 1495 | list_del(&c->list); |
1235 | if (!net_eq(mfc6_net(c), net)) { | 1496 | ip6mr_destroy_unres(mrt, c); |
1236 | cp = &c->next; | ||
1237 | continue; | ||
1238 | } | ||
1239 | *cp = c->next; | ||
1240 | ip6mr_destroy_unres(c); | ||
1241 | } | 1497 | } |
1242 | spin_unlock_bh(&mfc_unres_lock); | 1498 | spin_unlock_bh(&mfc_unres_lock); |
1243 | } | 1499 | } |
1244 | } | 1500 | } |
1245 | 1501 | ||
1246 | static int ip6mr_sk_init(struct sock *sk) | 1502 | static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk) |
1247 | { | 1503 | { |
1248 | int err = 0; | 1504 | int err = 0; |
1249 | struct net *net = sock_net(sk); | 1505 | struct net *net = sock_net(sk); |
1250 | 1506 | ||
1251 | rtnl_lock(); | 1507 | rtnl_lock(); |
1252 | write_lock_bh(&mrt_lock); | 1508 | write_lock_bh(&mrt_lock); |
1253 | if (likely(net->ipv6.mroute6_sk == NULL)) { | 1509 | if (likely(mrt->mroute6_sk == NULL)) { |
1254 | net->ipv6.mroute6_sk = sk; | 1510 | mrt->mroute6_sk = sk; |
1255 | net->ipv6.devconf_all->mc_forwarding++; | 1511 | net->ipv6.devconf_all->mc_forwarding++; |
1256 | } | 1512 | } |
1257 | else | 1513 | else |
@@ -1265,24 +1521,43 @@ static int ip6mr_sk_init(struct sock *sk) | |||
1265 | 1521 | ||
1266 | int ip6mr_sk_done(struct sock *sk) | 1522 | int ip6mr_sk_done(struct sock *sk) |
1267 | { | 1523 | { |
1268 | int err = 0; | 1524 | int err = -EACCES; |
1269 | struct net *net = sock_net(sk); | 1525 | struct net *net = sock_net(sk); |
1526 | struct mr6_table *mrt; | ||
1270 | 1527 | ||
1271 | rtnl_lock(); | 1528 | rtnl_lock(); |
1272 | if (sk == net->ipv6.mroute6_sk) { | 1529 | ip6mr_for_each_table(mrt, net) { |
1273 | write_lock_bh(&mrt_lock); | 1530 | if (sk == mrt->mroute6_sk) { |
1274 | net->ipv6.mroute6_sk = NULL; | 1531 | write_lock_bh(&mrt_lock); |
1275 | net->ipv6.devconf_all->mc_forwarding--; | 1532 | mrt->mroute6_sk = NULL; |
1276 | write_unlock_bh(&mrt_lock); | 1533 | net->ipv6.devconf_all->mc_forwarding--; |
1534 | write_unlock_bh(&mrt_lock); | ||
1277 | 1535 | ||
1278 | mroute_clean_tables(net); | 1536 | mroute_clean_tables(mrt); |
1279 | } else | 1537 | err = 0; |
1280 | err = -EACCES; | 1538 | break; |
1539 | } | ||
1540 | } | ||
1281 | rtnl_unlock(); | 1541 | rtnl_unlock(); |
1282 | 1542 | ||
1283 | return err; | 1543 | return err; |
1284 | } | 1544 | } |
1285 | 1545 | ||
1546 | struct sock *mroute6_socket(struct net *net, struct sk_buff *skb) | ||
1547 | { | ||
1548 | struct mr6_table *mrt; | ||
1549 | struct flowi fl = { | ||
1550 | .iif = skb->skb_iif, | ||
1551 | .oif = skb->dev->ifindex, | ||
1552 | .mark = skb->mark, | ||
1553 | }; | ||
1554 | |||
1555 | if (ip6mr_fib_lookup(net, &fl, &mrt) < 0) | ||
1556 | return NULL; | ||
1557 | |||
1558 | return mrt->mroute6_sk; | ||
1559 | } | ||
1560 | |||
1286 | /* | 1561 | /* |
1287 | * Socket options and virtual interface manipulation. The whole | 1562 | * Socket options and virtual interface manipulation. The whole |
1288 | * virtual interface system is a complete heap, but unfortunately | 1563 | * virtual interface system is a complete heap, but unfortunately |
@@ -1297,9 +1572,14 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns | |||
1297 | struct mf6cctl mfc; | 1572 | struct mf6cctl mfc; |
1298 | mifi_t mifi; | 1573 | mifi_t mifi; |
1299 | struct net *net = sock_net(sk); | 1574 | struct net *net = sock_net(sk); |
1575 | struct mr6_table *mrt; | ||
1576 | |||
1577 | mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); | ||
1578 | if (mrt == NULL) | ||
1579 | return -ENOENT; | ||
1300 | 1580 | ||
1301 | if (optname != MRT6_INIT) { | 1581 | if (optname != MRT6_INIT) { |
1302 | if (sk != net->ipv6.mroute6_sk && !capable(CAP_NET_ADMIN)) | 1582 | if (sk != mrt->mroute6_sk && !capable(CAP_NET_ADMIN)) |
1303 | return -EACCES; | 1583 | return -EACCES; |
1304 | } | 1584 | } |
1305 | 1585 | ||
@@ -1311,7 +1591,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns | |||
1311 | if (optlen < sizeof(int)) | 1591 | if (optlen < sizeof(int)) |
1312 | return -EINVAL; | 1592 | return -EINVAL; |
1313 | 1593 | ||
1314 | return ip6mr_sk_init(sk); | 1594 | return ip6mr_sk_init(mrt, sk); |
1315 | 1595 | ||
1316 | case MRT6_DONE: | 1596 | case MRT6_DONE: |
1317 | return ip6mr_sk_done(sk); | 1597 | return ip6mr_sk_done(sk); |
@@ -1324,7 +1604,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns | |||
1324 | if (vif.mif6c_mifi >= MAXMIFS) | 1604 | if (vif.mif6c_mifi >= MAXMIFS) |
1325 | return -ENFILE; | 1605 | return -ENFILE; |
1326 | rtnl_lock(); | 1606 | rtnl_lock(); |
1327 | ret = mif6_add(net, &vif, sk == net->ipv6.mroute6_sk); | 1607 | ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk); |
1328 | rtnl_unlock(); | 1608 | rtnl_unlock(); |
1329 | return ret; | 1609 | return ret; |
1330 | 1610 | ||
@@ -1334,7 +1614,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns | |||
1334 | if (copy_from_user(&mifi, optval, sizeof(mifi_t))) | 1614 | if (copy_from_user(&mifi, optval, sizeof(mifi_t))) |
1335 | return -EFAULT; | 1615 | return -EFAULT; |
1336 | rtnl_lock(); | 1616 | rtnl_lock(); |
1337 | ret = mif6_delete(net, mifi, NULL); | 1617 | ret = mif6_delete(mrt, mifi, NULL); |
1338 | rtnl_unlock(); | 1618 | rtnl_unlock(); |
1339 | return ret; | 1619 | return ret; |
1340 | 1620 | ||
@@ -1350,10 +1630,9 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns | |||
1350 | return -EFAULT; | 1630 | return -EFAULT; |
1351 | rtnl_lock(); | 1631 | rtnl_lock(); |
1352 | if (optname == MRT6_DEL_MFC) | 1632 | if (optname == MRT6_DEL_MFC) |
1353 | ret = ip6mr_mfc_delete(net, &mfc); | 1633 | ret = ip6mr_mfc_delete(mrt, &mfc); |
1354 | else | 1634 | else |
1355 | ret = ip6mr_mfc_add(net, &mfc, | 1635 | ret = ip6mr_mfc_add(net, mrt, &mfc, sk == mrt->mroute6_sk); |
1356 | sk == net->ipv6.mroute6_sk); | ||
1357 | rtnl_unlock(); | 1636 | rtnl_unlock(); |
1358 | return ret; | 1637 | return ret; |
1359 | 1638 | ||
@@ -1365,7 +1644,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns | |||
1365 | int v; | 1644 | int v; |
1366 | if (get_user(v, (int __user *)optval)) | 1645 | if (get_user(v, (int __user *)optval)) |
1367 | return -EFAULT; | 1646 | return -EFAULT; |
1368 | net->ipv6.mroute_do_assert = !!v; | 1647 | mrt->mroute_do_assert = !!v; |
1369 | return 0; | 1648 | return 0; |
1370 | } | 1649 | } |
1371 | 1650 | ||
@@ -1378,15 +1657,36 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns | |||
1378 | v = !!v; | 1657 | v = !!v; |
1379 | rtnl_lock(); | 1658 | rtnl_lock(); |
1380 | ret = 0; | 1659 | ret = 0; |
1381 | if (v != net->ipv6.mroute_do_pim) { | 1660 | if (v != mrt->mroute_do_pim) { |
1382 | net->ipv6.mroute_do_pim = v; | 1661 | mrt->mroute_do_pim = v; |
1383 | net->ipv6.mroute_do_assert = v; | 1662 | mrt->mroute_do_assert = v; |
1384 | } | 1663 | } |
1385 | rtnl_unlock(); | 1664 | rtnl_unlock(); |
1386 | return ret; | 1665 | return ret; |
1387 | } | 1666 | } |
1388 | 1667 | ||
1389 | #endif | 1668 | #endif |
1669 | #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES | ||
1670 | case MRT6_TABLE: | ||
1671 | { | ||
1672 | u32 v; | ||
1673 | |||
1674 | if (optlen != sizeof(u32)) | ||
1675 | return -EINVAL; | ||
1676 | if (get_user(v, (u32 __user *)optval)) | ||
1677 | return -EFAULT; | ||
1678 | if (sk == mrt->mroute6_sk) | ||
1679 | return -EBUSY; | ||
1680 | |||
1681 | rtnl_lock(); | ||
1682 | ret = 0; | ||
1683 | if (!ip6mr_new_table(net, v)) | ||
1684 | ret = -ENOMEM; | ||
1685 | raw6_sk(sk)->ip6mr_table = v; | ||
1686 | rtnl_unlock(); | ||
1687 | return ret; | ||
1688 | } | ||
1689 | #endif | ||
1390 | /* | 1690 | /* |
1391 | * Spurious command, or MRT6_VERSION which you cannot | 1691 | * Spurious command, or MRT6_VERSION which you cannot |
1392 | * set. | 1692 | * set. |
@@ -1406,6 +1706,11 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, | |||
1406 | int olr; | 1706 | int olr; |
1407 | int val; | 1707 | int val; |
1408 | struct net *net = sock_net(sk); | 1708 | struct net *net = sock_net(sk); |
1709 | struct mr6_table *mrt; | ||
1710 | |||
1711 | mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); | ||
1712 | if (mrt == NULL) | ||
1713 | return -ENOENT; | ||
1409 | 1714 | ||
1410 | switch (optname) { | 1715 | switch (optname) { |
1411 | case MRT6_VERSION: | 1716 | case MRT6_VERSION: |
@@ -1413,11 +1718,11 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, | |||
1413 | break; | 1718 | break; |
1414 | #ifdef CONFIG_IPV6_PIMSM_V2 | 1719 | #ifdef CONFIG_IPV6_PIMSM_V2 |
1415 | case MRT6_PIM: | 1720 | case MRT6_PIM: |
1416 | val = net->ipv6.mroute_do_pim; | 1721 | val = mrt->mroute_do_pim; |
1417 | break; | 1722 | break; |
1418 | #endif | 1723 | #endif |
1419 | case MRT6_ASSERT: | 1724 | case MRT6_ASSERT: |
1420 | val = net->ipv6.mroute_do_assert; | 1725 | val = mrt->mroute_do_assert; |
1421 | break; | 1726 | break; |
1422 | default: | 1727 | default: |
1423 | return -ENOPROTOOPT; | 1728 | return -ENOPROTOOPT; |
@@ -1448,16 +1753,21 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg) | |||
1448 | struct mif_device *vif; | 1753 | struct mif_device *vif; |
1449 | struct mfc6_cache *c; | 1754 | struct mfc6_cache *c; |
1450 | struct net *net = sock_net(sk); | 1755 | struct net *net = sock_net(sk); |
1756 | struct mr6_table *mrt; | ||
1757 | |||
1758 | mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT); | ||
1759 | if (mrt == NULL) | ||
1760 | return -ENOENT; | ||
1451 | 1761 | ||
1452 | switch (cmd) { | 1762 | switch (cmd) { |
1453 | case SIOCGETMIFCNT_IN6: | 1763 | case SIOCGETMIFCNT_IN6: |
1454 | if (copy_from_user(&vr, arg, sizeof(vr))) | 1764 | if (copy_from_user(&vr, arg, sizeof(vr))) |
1455 | return -EFAULT; | 1765 | return -EFAULT; |
1456 | if (vr.mifi >= net->ipv6.maxvif) | 1766 | if (vr.mifi >= mrt->maxvif) |
1457 | return -EINVAL; | 1767 | return -EINVAL; |
1458 | read_lock(&mrt_lock); | 1768 | read_lock(&mrt_lock); |
1459 | vif = &net->ipv6.vif6_table[vr.mifi]; | 1769 | vif = &mrt->vif6_table[vr.mifi]; |
1460 | if (MIF_EXISTS(net, vr.mifi)) { | 1770 | if (MIF_EXISTS(mrt, vr.mifi)) { |
1461 | vr.icount = vif->pkt_in; | 1771 | vr.icount = vif->pkt_in; |
1462 | vr.ocount = vif->pkt_out; | 1772 | vr.ocount = vif->pkt_out; |
1463 | vr.ibytes = vif->bytes_in; | 1773 | vr.ibytes = vif->bytes_in; |
@@ -1475,7 +1785,7 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg) | |||
1475 | return -EFAULT; | 1785 | return -EFAULT; |
1476 | 1786 | ||
1477 | read_lock(&mrt_lock); | 1787 | read_lock(&mrt_lock); |
1478 | c = ip6mr_cache_find(net, &sr.src.sin6_addr, &sr.grp.sin6_addr); | 1788 | c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr); |
1479 | if (c) { | 1789 | if (c) { |
1480 | sr.pktcnt = c->mfc_un.res.pkt; | 1790 | sr.pktcnt = c->mfc_un.res.pkt; |
1481 | sr.bytecnt = c->mfc_un.res.bytes; | 1791 | sr.bytecnt = c->mfc_un.res.bytes; |
@@ -1505,11 +1815,11 @@ static inline int ip6mr_forward2_finish(struct sk_buff *skb) | |||
1505 | * Processing handlers for ip6mr_forward | 1815 | * Processing handlers for ip6mr_forward |
1506 | */ | 1816 | */ |
1507 | 1817 | ||
1508 | static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi) | 1818 | static int ip6mr_forward2(struct net *net, struct mr6_table *mrt, |
1819 | struct sk_buff *skb, struct mfc6_cache *c, int vifi) | ||
1509 | { | 1820 | { |
1510 | struct ipv6hdr *ipv6h; | 1821 | struct ipv6hdr *ipv6h; |
1511 | struct net *net = mfc6_net(c); | 1822 | struct mif_device *vif = &mrt->vif6_table[vifi]; |
1512 | struct mif_device *vif = &net->ipv6.vif6_table[vifi]; | ||
1513 | struct net_device *dev; | 1823 | struct net_device *dev; |
1514 | struct dst_entry *dst; | 1824 | struct dst_entry *dst; |
1515 | struct flowi fl; | 1825 | struct flowi fl; |
@@ -1523,7 +1833,7 @@ static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi) | |||
1523 | vif->bytes_out += skb->len; | 1833 | vif->bytes_out += skb->len; |
1524 | vif->dev->stats.tx_bytes += skb->len; | 1834 | vif->dev->stats.tx_bytes += skb->len; |
1525 | vif->dev->stats.tx_packets++; | 1835 | vif->dev->stats.tx_packets++; |
1526 | ip6mr_cache_report(net, skb, vifi, MRT6MSG_WHOLEPKT); | 1836 | ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT); |
1527 | goto out_free; | 1837 | goto out_free; |
1528 | } | 1838 | } |
1529 | #endif | 1839 | #endif |
@@ -1578,22 +1888,22 @@ out_free: | |||
1578 | return 0; | 1888 | return 0; |
1579 | } | 1889 | } |
1580 | 1890 | ||
1581 | static int ip6mr_find_vif(struct net_device *dev) | 1891 | static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev) |
1582 | { | 1892 | { |
1583 | struct net *net = dev_net(dev); | ||
1584 | int ct; | 1893 | int ct; |
1585 | for (ct = net->ipv6.maxvif - 1; ct >= 0; ct--) { | 1894 | |
1586 | if (net->ipv6.vif6_table[ct].dev == dev) | 1895 | for (ct = mrt->maxvif - 1; ct >= 0; ct--) { |
1896 | if (mrt->vif6_table[ct].dev == dev) | ||
1587 | break; | 1897 | break; |
1588 | } | 1898 | } |
1589 | return ct; | 1899 | return ct; |
1590 | } | 1900 | } |
1591 | 1901 | ||
1592 | static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache) | 1902 | static int ip6_mr_forward(struct net *net, struct mr6_table *mrt, |
1903 | struct sk_buff *skb, struct mfc6_cache *cache) | ||
1593 | { | 1904 | { |
1594 | int psend = -1; | 1905 | int psend = -1; |
1595 | int vif, ct; | 1906 | int vif, ct; |
1596 | struct net *net = mfc6_net(cache); | ||
1597 | 1907 | ||
1598 | vif = cache->mf6c_parent; | 1908 | vif = cache->mf6c_parent; |
1599 | cache->mfc_un.res.pkt++; | 1909 | cache->mfc_un.res.pkt++; |
@@ -1602,30 +1912,30 @@ static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache) | |||
1602 | /* | 1912 | /* |
1603 | * Wrong interface: drop packet and (maybe) send PIM assert. | 1913 | * Wrong interface: drop packet and (maybe) send PIM assert. |
1604 | */ | 1914 | */ |
1605 | if (net->ipv6.vif6_table[vif].dev != skb->dev) { | 1915 | if (mrt->vif6_table[vif].dev != skb->dev) { |
1606 | int true_vifi; | 1916 | int true_vifi; |
1607 | 1917 | ||
1608 | cache->mfc_un.res.wrong_if++; | 1918 | cache->mfc_un.res.wrong_if++; |
1609 | true_vifi = ip6mr_find_vif(skb->dev); | 1919 | true_vifi = ip6mr_find_vif(mrt, skb->dev); |
1610 | 1920 | ||
1611 | if (true_vifi >= 0 && net->ipv6.mroute_do_assert && | 1921 | if (true_vifi >= 0 && mrt->mroute_do_assert && |
1612 | /* pimsm uses asserts, when switching from RPT to SPT, | 1922 | /* pimsm uses asserts, when switching from RPT to SPT, |
1613 | so that we cannot check that packet arrived on an oif. | 1923 | so that we cannot check that packet arrived on an oif. |
1614 | It is bad, but otherwise we would need to move pretty | 1924 | It is bad, but otherwise we would need to move pretty |
1615 | large chunk of pimd to kernel. Ough... --ANK | 1925 | large chunk of pimd to kernel. Ough... --ANK |
1616 | */ | 1926 | */ |
1617 | (net->ipv6.mroute_do_pim || | 1927 | (mrt->mroute_do_pim || |
1618 | cache->mfc_un.res.ttls[true_vifi] < 255) && | 1928 | cache->mfc_un.res.ttls[true_vifi] < 255) && |
1619 | time_after(jiffies, | 1929 | time_after(jiffies, |
1620 | cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { | 1930 | cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { |
1621 | cache->mfc_un.res.last_assert = jiffies; | 1931 | cache->mfc_un.res.last_assert = jiffies; |
1622 | ip6mr_cache_report(net, skb, true_vifi, MRT6MSG_WRONGMIF); | 1932 | ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF); |
1623 | } | 1933 | } |
1624 | goto dont_forward; | 1934 | goto dont_forward; |
1625 | } | 1935 | } |
1626 | 1936 | ||
1627 | net->ipv6.vif6_table[vif].pkt_in++; | 1937 | mrt->vif6_table[vif].pkt_in++; |
1628 | net->ipv6.vif6_table[vif].bytes_in += skb->len; | 1938 | mrt->vif6_table[vif].bytes_in += skb->len; |
1629 | 1939 | ||
1630 | /* | 1940 | /* |
1631 | * Forward the frame | 1941 | * Forward the frame |
@@ -1635,13 +1945,13 @@ static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache) | |||
1635 | if (psend != -1) { | 1945 | if (psend != -1) { |
1636 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); | 1946 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); |
1637 | if (skb2) | 1947 | if (skb2) |
1638 | ip6mr_forward2(skb2, cache, psend); | 1948 | ip6mr_forward2(net, mrt, skb2, cache, psend); |
1639 | } | 1949 | } |
1640 | psend = ct; | 1950 | psend = ct; |
1641 | } | 1951 | } |
1642 | } | 1952 | } |
1643 | if (psend != -1) { | 1953 | if (psend != -1) { |
1644 | ip6mr_forward2(skb, cache, psend); | 1954 | ip6mr_forward2(net, mrt, skb, cache, psend); |
1645 | return 0; | 1955 | return 0; |
1646 | } | 1956 | } |
1647 | 1957 | ||
@@ -1659,9 +1969,19 @@ int ip6_mr_input(struct sk_buff *skb) | |||
1659 | { | 1969 | { |
1660 | struct mfc6_cache *cache; | 1970 | struct mfc6_cache *cache; |
1661 | struct net *net = dev_net(skb->dev); | 1971 | struct net *net = dev_net(skb->dev); |
1972 | struct mr6_table *mrt; | ||
1973 | struct flowi fl = { | ||
1974 | .iif = skb->dev->ifindex, | ||
1975 | .mark = skb->mark, | ||
1976 | }; | ||
1977 | int err; | ||
1978 | |||
1979 | err = ip6mr_fib_lookup(net, &fl, &mrt); | ||
1980 | if (err < 0) | ||
1981 | return err; | ||
1662 | 1982 | ||
1663 | read_lock(&mrt_lock); | 1983 | read_lock(&mrt_lock); |
1664 | cache = ip6mr_cache_find(net, | 1984 | cache = ip6mr_cache_find(mrt, |
1665 | &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr); | 1985 | &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr); |
1666 | 1986 | ||
1667 | /* | 1987 | /* |
@@ -1670,9 +1990,9 @@ int ip6_mr_input(struct sk_buff *skb) | |||
1670 | if (cache == NULL) { | 1990 | if (cache == NULL) { |
1671 | int vif; | 1991 | int vif; |
1672 | 1992 | ||
1673 | vif = ip6mr_find_vif(skb->dev); | 1993 | vif = ip6mr_find_vif(mrt, skb->dev); |
1674 | if (vif >= 0) { | 1994 | if (vif >= 0) { |
1675 | int err = ip6mr_cache_unresolved(net, vif, skb); | 1995 | int err = ip6mr_cache_unresolved(mrt, vif, skb); |
1676 | read_unlock(&mrt_lock); | 1996 | read_unlock(&mrt_lock); |
1677 | 1997 | ||
1678 | return err; | 1998 | return err; |
@@ -1682,7 +2002,7 @@ int ip6_mr_input(struct sk_buff *skb) | |||
1682 | return -ENODEV; | 2002 | return -ENODEV; |
1683 | } | 2003 | } |
1684 | 2004 | ||
1685 | ip6_mr_forward(skb, cache); | 2005 | ip6_mr_forward(net, mrt, skb, cache); |
1686 | 2006 | ||
1687 | read_unlock(&mrt_lock); | 2007 | read_unlock(&mrt_lock); |
1688 | 2008 | ||
@@ -1690,12 +2010,11 @@ int ip6_mr_input(struct sk_buff *skb) | |||
1690 | } | 2010 | } |
1691 | 2011 | ||
1692 | 2012 | ||
1693 | static int | 2013 | static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, |
1694 | ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm) | 2014 | struct mfc6_cache *c, struct rtmsg *rtm) |
1695 | { | 2015 | { |
1696 | int ct; | 2016 | int ct; |
1697 | struct rtnexthop *nhp; | 2017 | struct rtnexthop *nhp; |
1698 | struct net *net = mfc6_net(c); | ||
1699 | u8 *b = skb_tail_pointer(skb); | 2018 | u8 *b = skb_tail_pointer(skb); |
1700 | struct rtattr *mp_head; | 2019 | struct rtattr *mp_head; |
1701 | 2020 | ||
@@ -1703,19 +2022,19 @@ ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm) | |||
1703 | if (c->mf6c_parent > MAXMIFS) | 2022 | if (c->mf6c_parent > MAXMIFS) |
1704 | return -ENOENT; | 2023 | return -ENOENT; |
1705 | 2024 | ||
1706 | if (MIF_EXISTS(net, c->mf6c_parent)) | 2025 | if (MIF_EXISTS(mrt, c->mf6c_parent)) |
1707 | RTA_PUT(skb, RTA_IIF, 4, &net->ipv6.vif6_table[c->mf6c_parent].dev->ifindex); | 2026 | RTA_PUT(skb, RTA_IIF, 4, &mrt->vif6_table[c->mf6c_parent].dev->ifindex); |
1708 | 2027 | ||
1709 | mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); | 2028 | mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); |
1710 | 2029 | ||
1711 | for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { | 2030 | for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { |
1712 | if (MIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) { | 2031 | if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) { |
1713 | if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) | 2032 | if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) |
1714 | goto rtattr_failure; | 2033 | goto rtattr_failure; |
1715 | nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); | 2034 | nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); |
1716 | nhp->rtnh_flags = 0; | 2035 | nhp->rtnh_flags = 0; |
1717 | nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; | 2036 | nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; |
1718 | nhp->rtnh_ifindex = net->ipv6.vif6_table[ct].dev->ifindex; | 2037 | nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex; |
1719 | nhp->rtnh_len = sizeof(*nhp); | 2038 | nhp->rtnh_len = sizeof(*nhp); |
1720 | } | 2039 | } |
1721 | } | 2040 | } |
@@ -1733,11 +2052,16 @@ int ip6mr_get_route(struct net *net, | |||
1733 | struct sk_buff *skb, struct rtmsg *rtm, int nowait) | 2052 | struct sk_buff *skb, struct rtmsg *rtm, int nowait) |
1734 | { | 2053 | { |
1735 | int err; | 2054 | int err; |
2055 | struct mr6_table *mrt; | ||
1736 | struct mfc6_cache *cache; | 2056 | struct mfc6_cache *cache; |
1737 | struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); | 2057 | struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); |
1738 | 2058 | ||
2059 | mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); | ||
2060 | if (mrt == NULL) | ||
2061 | return -ENOENT; | ||
2062 | |||
1739 | read_lock(&mrt_lock); | 2063 | read_lock(&mrt_lock); |
1740 | cache = ip6mr_cache_find(net, &rt->rt6i_src.addr, &rt->rt6i_dst.addr); | 2064 | cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr); |
1741 | 2065 | ||
1742 | if (!cache) { | 2066 | if (!cache) { |
1743 | struct sk_buff *skb2; | 2067 | struct sk_buff *skb2; |
@@ -1751,7 +2075,7 @@ int ip6mr_get_route(struct net *net, | |||
1751 | } | 2075 | } |
1752 | 2076 | ||
1753 | dev = skb->dev; | 2077 | dev = skb->dev; |
1754 | if (dev == NULL || (vif = ip6mr_find_vif(dev)) < 0) { | 2078 | if (dev == NULL || (vif = ip6mr_find_vif(mrt, dev)) < 0) { |
1755 | read_unlock(&mrt_lock); | 2079 | read_unlock(&mrt_lock); |
1756 | return -ENODEV; | 2080 | return -ENODEV; |
1757 | } | 2081 | } |
@@ -1780,7 +2104,7 @@ int ip6mr_get_route(struct net *net, | |||
1780 | ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr); | 2104 | ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr); |
1781 | ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr); | 2105 | ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr); |
1782 | 2106 | ||
1783 | err = ip6mr_cache_unresolved(net, vif, skb2); | 2107 | err = ip6mr_cache_unresolved(mrt, vif, skb2); |
1784 | read_unlock(&mrt_lock); | 2108 | read_unlock(&mrt_lock); |
1785 | 2109 | ||
1786 | return err; | 2110 | return err; |
@@ -1789,8 +2113,88 @@ int ip6mr_get_route(struct net *net, | |||
1789 | if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) | 2113 | if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) |
1790 | cache->mfc_flags |= MFC_NOTIFY; | 2114 | cache->mfc_flags |= MFC_NOTIFY; |
1791 | 2115 | ||
1792 | err = ip6mr_fill_mroute(skb, cache, rtm); | 2116 | err = __ip6mr_fill_mroute(mrt, skb, cache, rtm); |
1793 | read_unlock(&mrt_lock); | 2117 | read_unlock(&mrt_lock); |
1794 | return err; | 2118 | return err; |
1795 | } | 2119 | } |
1796 | 2120 | ||
2121 | static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, | ||
2122 | u32 pid, u32 seq, struct mfc6_cache *c) | ||
2123 | { | ||
2124 | struct nlmsghdr *nlh; | ||
2125 | struct rtmsg *rtm; | ||
2126 | |||
2127 | nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI); | ||
2128 | if (nlh == NULL) | ||
2129 | return -EMSGSIZE; | ||
2130 | |||
2131 | rtm = nlmsg_data(nlh); | ||
2132 | rtm->rtm_family = RTNL_FAMILY_IPMR; | ||
2133 | rtm->rtm_dst_len = 128; | ||
2134 | rtm->rtm_src_len = 128; | ||
2135 | rtm->rtm_tos = 0; | ||
2136 | rtm->rtm_table = mrt->id; | ||
2137 | NLA_PUT_U32(skb, RTA_TABLE, mrt->id); | ||
2138 | rtm->rtm_scope = RT_SCOPE_UNIVERSE; | ||
2139 | rtm->rtm_protocol = RTPROT_UNSPEC; | ||
2140 | rtm->rtm_flags = 0; | ||
2141 | |||
2142 | NLA_PUT(skb, RTA_SRC, 16, &c->mf6c_origin); | ||
2143 | NLA_PUT(skb, RTA_DST, 16, &c->mf6c_mcastgrp); | ||
2144 | |||
2145 | if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0) | ||
2146 | goto nla_put_failure; | ||
2147 | |||
2148 | return nlmsg_end(skb, nlh); | ||
2149 | |||
2150 | nla_put_failure: | ||
2151 | nlmsg_cancel(skb, nlh); | ||
2152 | return -EMSGSIZE; | ||
2153 | } | ||
2154 | |||
2155 | static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) | ||
2156 | { | ||
2157 | struct net *net = sock_net(skb->sk); | ||
2158 | struct mr6_table *mrt; | ||
2159 | struct mfc6_cache *mfc; | ||
2160 | unsigned int t = 0, s_t; | ||
2161 | unsigned int h = 0, s_h; | ||
2162 | unsigned int e = 0, s_e; | ||
2163 | |||
2164 | s_t = cb->args[0]; | ||
2165 | s_h = cb->args[1]; | ||
2166 | s_e = cb->args[2]; | ||
2167 | |||
2168 | read_lock(&mrt_lock); | ||
2169 | ip6mr_for_each_table(mrt, net) { | ||
2170 | if (t < s_t) | ||
2171 | goto next_table; | ||
2172 | if (t > s_t) | ||
2173 | s_h = 0; | ||
2174 | for (h = s_h; h < MFC6_LINES; h++) { | ||
2175 | list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) { | ||
2176 | if (e < s_e) | ||
2177 | goto next_entry; | ||
2178 | if (ip6mr_fill_mroute(mrt, skb, | ||
2179 | NETLINK_CB(cb->skb).pid, | ||
2180 | cb->nlh->nlmsg_seq, | ||
2181 | mfc) < 0) | ||
2182 | goto done; | ||
2183 | next_entry: | ||
2184 | e++; | ||
2185 | } | ||
2186 | e = s_e = 0; | ||
2187 | } | ||
2188 | s_h = 0; | ||
2189 | next_table: | ||
2190 | t++; | ||
2191 | } | ||
2192 | done: | ||
2193 | read_unlock(&mrt_lock); | ||
2194 | |||
2195 | cb->args[2] = e; | ||
2196 | cb->args[1] = h; | ||
2197 | cb->args[0] = t; | ||
2198 | |||
2199 | return skb->len; | ||
2200 | } | ||