diff options
author | Patrick McHardy <kaber@trash.net> | 2010-05-11 08:40:48 -0400 |
---|---|---|
committer | Patrick McHardy <kaber@trash.net> | 2010-05-11 08:40:48 -0400 |
commit | c476efbcde5ba58b81ac752f4a894d6db8e17d94 (patch) | |
tree | 69aea7351adf2ae537e4d6ec76e1cc0799a16593 /net | |
parent | d250fe91ae129bff0968e685cc9c466d3a5e3482 (diff) |
ipv6: ip6mr: move unres_queue and timer to per-namespace data
The unres_queue is currently shared between all namespaces. Following patches
will additionally allow to create multiple multicast routing tables in each
namespace. Having a single shared queue for all these users seems to excessive,
move the queue and the cleanup timer to the per-namespace data to unshare it.
As a side-effect, this fixes a bug in the seq file iteration functions: the
first entry returned is always from the current namespace, entries returned
after that may belong to any namespace.
Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv6/ip6mr.c | 74 |
1 files changed, 33 insertions, 41 deletions
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index e0b530ca394c..7236030e403e 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -63,8 +63,6 @@ static DEFINE_RWLOCK(mrt_lock); | |||
63 | 63 | ||
64 | #define MIF_EXISTS(_net, _idx) ((_net)->ipv6.vif6_table[_idx].dev != NULL) | 64 | #define MIF_EXISTS(_net, _idx) ((_net)->ipv6.vif6_table[_idx].dev != NULL) |
65 | 65 | ||
66 | static struct mfc6_cache *mfc_unres_queue; /* Queue of unresolved entries */ | ||
67 | |||
68 | /* Special spinlock for queue of unresolved entries */ | 66 | /* Special spinlock for queue of unresolved entries */ |
69 | static DEFINE_SPINLOCK(mfc_unres_lock); | 67 | static DEFINE_SPINLOCK(mfc_unres_lock); |
70 | 68 | ||
@@ -84,8 +82,6 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, | |||
84 | static int ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm); | 82 | static int ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm); |
85 | static void mroute_clean_tables(struct net *net); | 83 | static void mroute_clean_tables(struct net *net); |
86 | 84 | ||
87 | static struct timer_list ipmr_expire_timer; | ||
88 | |||
89 | 85 | ||
90 | #ifdef CONFIG_PROC_FS | 86 | #ifdef CONFIG_PROC_FS |
91 | 87 | ||
@@ -110,11 +106,10 @@ static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net, | |||
110 | return mfc; | 106 | return mfc; |
111 | read_unlock(&mrt_lock); | 107 | read_unlock(&mrt_lock); |
112 | 108 | ||
113 | it->cache = &mfc_unres_queue; | 109 | it->cache = &net->ipv6.mfc6_unres_queue; |
114 | spin_lock_bh(&mfc_unres_lock); | 110 | spin_lock_bh(&mfc_unres_lock); |
115 | for (mfc = mfc_unres_queue; mfc; mfc = mfc->next) | 111 | for (mfc = net->ipv6.mfc6_unres_queue; mfc; mfc = mfc->next) |
116 | if (net_eq(mfc6_net(mfc), net) && | 112 | if (pos-- == 0) |
117 | pos-- == 0) | ||
118 | return mfc; | 113 | return mfc; |
119 | spin_unlock_bh(&mfc_unres_lock); | 114 | spin_unlock_bh(&mfc_unres_lock); |
120 | 115 | ||
@@ -244,7 +239,7 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
244 | if (mfc->next) | 239 | if (mfc->next) |
245 | return mfc->next; | 240 | return mfc->next; |
246 | 241 | ||
247 | if (it->cache == &mfc_unres_queue) | 242 | if (it->cache == &net->ipv6.mfc6_unres_queue) |
248 | goto end_of_list; | 243 | goto end_of_list; |
249 | 244 | ||
250 | BUG_ON(it->cache != net->ipv6.mfc6_cache_array); | 245 | BUG_ON(it->cache != net->ipv6.mfc6_cache_array); |
@@ -257,11 +252,11 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
257 | 252 | ||
258 | /* exhausted cache_array, show unresolved */ | 253 | /* exhausted cache_array, show unresolved */ |
259 | read_unlock(&mrt_lock); | 254 | read_unlock(&mrt_lock); |
260 | it->cache = &mfc_unres_queue; | 255 | it->cache = &net->ipv6.mfc6_unres_queue; |
261 | it->ct = 0; | 256 | it->ct = 0; |
262 | 257 | ||
263 | spin_lock_bh(&mfc_unres_lock); | 258 | spin_lock_bh(&mfc_unres_lock); |
264 | mfc = mfc_unres_queue; | 259 | mfc = net->ipv6.mfc6_unres_queue; |
265 | if (mfc) | 260 | if (mfc) |
266 | return mfc; | 261 | return mfc; |
267 | 262 | ||
@@ -277,7 +272,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) | |||
277 | struct ipmr_mfc_iter *it = seq->private; | 272 | struct ipmr_mfc_iter *it = seq->private; |
278 | struct net *net = seq_file_net(seq); | 273 | struct net *net = seq_file_net(seq); |
279 | 274 | ||
280 | if (it->cache == &mfc_unres_queue) | 275 | if (it->cache == &net->ipv6.mfc6_unres_queue) |
281 | spin_unlock_bh(&mfc_unres_lock); | 276 | spin_unlock_bh(&mfc_unres_lock); |
282 | else if (it->cache == net->ipv6.mfc6_cache_array) | 277 | else if (it->cache == net->ipv6.mfc6_cache_array) |
283 | read_unlock(&mrt_lock); | 278 | read_unlock(&mrt_lock); |
@@ -301,7 +296,7 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) | |||
301 | &mfc->mf6c_mcastgrp, &mfc->mf6c_origin, | 296 | &mfc->mf6c_mcastgrp, &mfc->mf6c_origin, |
302 | mfc->mf6c_parent); | 297 | mfc->mf6c_parent); |
303 | 298 | ||
304 | if (it->cache != &mfc_unres_queue) { | 299 | if (it->cache != &net->ipv6.mfc6_unres_queue) { |
305 | seq_printf(seq, " %8lu %8lu %8lu", | 300 | seq_printf(seq, " %8lu %8lu %8lu", |
306 | mfc->mfc_un.res.pkt, | 301 | mfc->mfc_un.res.pkt, |
307 | mfc->mfc_un.res.bytes, | 302 | mfc->mfc_un.res.bytes, |
@@ -559,15 +554,15 @@ static void ip6mr_destroy_unres(struct mfc6_cache *c) | |||
559 | } | 554 | } |
560 | 555 | ||
561 | 556 | ||
562 | /* Single timer process for all the unresolved queue. */ | 557 | /* Timer process for all the unresolved queue. */ |
563 | 558 | ||
564 | static void ipmr_do_expire_process(unsigned long dummy) | 559 | static void ipmr_do_expire_process(struct net *net) |
565 | { | 560 | { |
566 | unsigned long now = jiffies; | 561 | unsigned long now = jiffies; |
567 | unsigned long expires = 10 * HZ; | 562 | unsigned long expires = 10 * HZ; |
568 | struct mfc6_cache *c, **cp; | 563 | struct mfc6_cache *c, **cp; |
569 | 564 | ||
570 | cp = &mfc_unres_queue; | 565 | cp = &net->ipv6.mfc6_unres_queue; |
571 | 566 | ||
572 | while ((c = *cp) != NULL) { | 567 | while ((c = *cp) != NULL) { |
573 | if (time_after(c->mfc_un.unres.expires, now)) { | 568 | if (time_after(c->mfc_un.unres.expires, now)) { |
@@ -583,19 +578,21 @@ static void ipmr_do_expire_process(unsigned long dummy) | |||
583 | ip6mr_destroy_unres(c); | 578 | ip6mr_destroy_unres(c); |
584 | } | 579 | } |
585 | 580 | ||
586 | if (mfc_unres_queue != NULL) | 581 | if (net->ipv6.mfc6_unres_queue != NULL) |
587 | mod_timer(&ipmr_expire_timer, jiffies + expires); | 582 | mod_timer(&net->ipv6.ipmr_expire_timer, jiffies + expires); |
588 | } | 583 | } |
589 | 584 | ||
590 | static void ipmr_expire_process(unsigned long dummy) | 585 | static void ipmr_expire_process(unsigned long arg) |
591 | { | 586 | { |
587 | struct net *net = (struct net *)arg; | ||
588 | |||
592 | if (!spin_trylock(&mfc_unres_lock)) { | 589 | if (!spin_trylock(&mfc_unres_lock)) { |
593 | mod_timer(&ipmr_expire_timer, jiffies + 1); | 590 | mod_timer(&net->ipv6.ipmr_expire_timer, jiffies + 1); |
594 | return; | 591 | return; |
595 | } | 592 | } |
596 | 593 | ||
597 | if (mfc_unres_queue != NULL) | 594 | if (net->ipv6.mfc6_unres_queue != NULL) |
598 | ipmr_do_expire_process(dummy); | 595 | ipmr_do_expire_process(net); |
599 | 596 | ||
600 | spin_unlock(&mfc_unres_lock); | 597 | spin_unlock(&mfc_unres_lock); |
601 | } | 598 | } |
@@ -880,9 +877,8 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) | |||
880 | struct mfc6_cache *c; | 877 | struct mfc6_cache *c; |
881 | 878 | ||
882 | spin_lock_bh(&mfc_unres_lock); | 879 | spin_lock_bh(&mfc_unres_lock); |
883 | for (c = mfc_unres_queue; c; c = c->next) { | 880 | for (c = net->ipv6.mfc6_unres_queue; c; c = c->next) { |
884 | if (net_eq(mfc6_net(c), net) && | 881 | if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) && |
885 | ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) && | ||
886 | ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) | 882 | ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) |
887 | break; | 883 | break; |
888 | } | 884 | } |
@@ -923,10 +919,10 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb) | |||
923 | } | 919 | } |
924 | 920 | ||
925 | atomic_inc(&net->ipv6.cache_resolve_queue_len); | 921 | atomic_inc(&net->ipv6.cache_resolve_queue_len); |
926 | c->next = mfc_unres_queue; | 922 | c->next = net->ipv6.mfc6_unres_queue; |
927 | mfc_unres_queue = c; | 923 | net->ipv6.mfc6_unres_queue = c; |
928 | 924 | ||
929 | ipmr_do_expire_process(1); | 925 | ipmr_do_expire_process(net); |
930 | } | 926 | } |
931 | 927 | ||
932 | /* | 928 | /* |
@@ -1019,6 +1015,9 @@ static int __net_init ip6mr_net_init(struct net *net) | |||
1019 | goto fail_mfc6_cache; | 1015 | goto fail_mfc6_cache; |
1020 | } | 1016 | } |
1021 | 1017 | ||
1018 | setup_timer(&net->ipv6.ipmr_expire_timer, ipmr_expire_process, | ||
1019 | (unsigned long)net); | ||
1020 | |||
1022 | #ifdef CONFIG_IPV6_PIMSM_V2 | 1021 | #ifdef CONFIG_IPV6_PIMSM_V2 |
1023 | net->ipv6.mroute_reg_vif_num = -1; | 1022 | net->ipv6.mroute_reg_vif_num = -1; |
1024 | #endif | 1023 | #endif |
@@ -1050,6 +1049,7 @@ static void __net_exit ip6mr_net_exit(struct net *net) | |||
1050 | proc_net_remove(net, "ip6_mr_cache"); | 1049 | proc_net_remove(net, "ip6_mr_cache"); |
1051 | proc_net_remove(net, "ip6_mr_vif"); | 1050 | proc_net_remove(net, "ip6_mr_vif"); |
1052 | #endif | 1051 | #endif |
1052 | del_timer(&net->ipv6.ipmr_expire_timer); | ||
1053 | mroute_clean_tables(net); | 1053 | mroute_clean_tables(net); |
1054 | kfree(net->ipv6.mfc6_cache_array); | 1054 | kfree(net->ipv6.mfc6_cache_array); |
1055 | kfree(net->ipv6.vif6_table); | 1055 | kfree(net->ipv6.vif6_table); |
@@ -1075,7 +1075,6 @@ int __init ip6_mr_init(void) | |||
1075 | if (err) | 1075 | if (err) |
1076 | goto reg_pernet_fail; | 1076 | goto reg_pernet_fail; |
1077 | 1077 | ||
1078 | setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); | ||
1079 | err = register_netdevice_notifier(&ip6_mr_notifier); | 1078 | err = register_netdevice_notifier(&ip6_mr_notifier); |
1080 | if (err) | 1079 | if (err) |
1081 | goto reg_notif_fail; | 1080 | goto reg_notif_fail; |
@@ -1092,7 +1091,6 @@ add_proto_fail: | |||
1092 | unregister_netdevice_notifier(&ip6_mr_notifier); | 1091 | unregister_netdevice_notifier(&ip6_mr_notifier); |
1093 | #endif | 1092 | #endif |
1094 | reg_notif_fail: | 1093 | reg_notif_fail: |
1095 | del_timer(&ipmr_expire_timer); | ||
1096 | unregister_pernet_subsys(&ip6mr_net_ops); | 1094 | unregister_pernet_subsys(&ip6mr_net_ops); |
1097 | reg_pernet_fail: | 1095 | reg_pernet_fail: |
1098 | kmem_cache_destroy(mrt_cachep); | 1096 | kmem_cache_destroy(mrt_cachep); |
@@ -1102,7 +1100,6 @@ reg_pernet_fail: | |||
1102 | void ip6_mr_cleanup(void) | 1100 | void ip6_mr_cleanup(void) |
1103 | { | 1101 | { |
1104 | unregister_netdevice_notifier(&ip6_mr_notifier); | 1102 | unregister_netdevice_notifier(&ip6_mr_notifier); |
1105 | del_timer(&ipmr_expire_timer); | ||
1106 | unregister_pernet_subsys(&ip6mr_net_ops); | 1103 | unregister_pernet_subsys(&ip6mr_net_ops); |
1107 | kmem_cache_destroy(mrt_cachep); | 1104 | kmem_cache_destroy(mrt_cachep); |
1108 | } | 1105 | } |
@@ -1167,18 +1164,17 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) | |||
1167 | * need to send on the frames and tidy up. | 1164 | * need to send on the frames and tidy up. |
1168 | */ | 1165 | */ |
1169 | spin_lock_bh(&mfc_unres_lock); | 1166 | spin_lock_bh(&mfc_unres_lock); |
1170 | for (cp = &mfc_unres_queue; (uc = *cp) != NULL; | 1167 | for (cp = &net->ipv6.mfc6_unres_queue; (uc = *cp) != NULL; |
1171 | cp = &uc->next) { | 1168 | cp = &uc->next) { |
1172 | if (net_eq(mfc6_net(uc), net) && | 1169 | if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) && |
1173 | ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) && | ||
1174 | ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) { | 1170 | ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) { |
1175 | *cp = uc->next; | 1171 | *cp = uc->next; |
1176 | atomic_dec(&net->ipv6.cache_resolve_queue_len); | 1172 | atomic_dec(&net->ipv6.cache_resolve_queue_len); |
1177 | break; | 1173 | break; |
1178 | } | 1174 | } |
1179 | } | 1175 | } |
1180 | if (mfc_unres_queue == NULL) | 1176 | if (net->ipv6.mfc6_unres_queue == NULL) |
1181 | del_timer(&ipmr_expire_timer); | 1177 | del_timer(&net->ipv6.ipmr_expire_timer); |
1182 | spin_unlock_bh(&mfc_unres_lock); | 1178 | spin_unlock_bh(&mfc_unres_lock); |
1183 | 1179 | ||
1184 | if (uc) { | 1180 | if (uc) { |
@@ -1230,12 +1226,8 @@ static void mroute_clean_tables(struct net *net) | |||
1230 | struct mfc6_cache *c, **cp; | 1226 | struct mfc6_cache *c, **cp; |
1231 | 1227 | ||
1232 | spin_lock_bh(&mfc_unres_lock); | 1228 | spin_lock_bh(&mfc_unres_lock); |
1233 | cp = &mfc_unres_queue; | 1229 | cp = &net->ipv6.mfc6_unres_queue; |
1234 | while ((c = *cp) != NULL) { | 1230 | while ((c = *cp) != NULL) { |
1235 | if (!net_eq(mfc6_net(c), net)) { | ||
1236 | cp = &c->next; | ||
1237 | continue; | ||
1238 | } | ||
1239 | *cp = c->next; | 1231 | *cp = c->next; |
1240 | ip6mr_destroy_unres(c); | 1232 | ip6mr_destroy_unres(c); |
1241 | } | 1233 | } |