diff options
author | Benjamin Thery <benjamin.thery@bull.net> | 2008-12-10 19:27:21 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-12-10 19:27:21 -0500 |
commit | 4045e57c19bee150370390545ee8a933b3f7a18d (patch) | |
tree | a4744efeed6819eff5acd9c13ca0fafe71df144d /net/ipv6/ip6mr.c | |
parent | 4a6258a0e33d042e4c84d9dec25d45ddb40a70b3 (diff) |
netns: ip6mr: declare counter cache_resolve_queue_len per-namespace
Preliminary work to make IPv6 multicast forwarding netns-aware.
Declare variable cache_resolve_queue_len per-namespace: moves it into
struct netns_ipv6.
This variable counts the number of unresolved cache entries queued in the
list mfc_unres_queue. This list is kept global to all netns as the number
of entries per namespace is limited to 10 (hardcoded in routine
ip6mr_cache_unresolved).
Entries belonging to different namespaces in mfc_unres_queue will be
identified by matching the mfc_net member introduced previously in
struct mfc6_cache.
Keeping this list global to all netns, also allows us to keep a single
timer (ipmr_expire_timer) to handle their expiration.
In some places cache_resolve_queue_len value was tested for arming
or deleting the timer. These tests were equivalent to testing
mfc_unres_queue value instead and are replaced in this patch.
At the moment, cache_resolve_queue_len is only referenced in init_net.
Signed-off-by: Benjamin Thery <benjamin.thery@bull.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/ip6mr.c')
-rw-r--r-- | net/ipv6/ip6mr.c | 40 |
1 files changed, 21 insertions, 19 deletions
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 287e526ba036..077c8198eb53 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -69,7 +69,6 @@ static int mroute_do_pim; | |||
69 | #endif | 69 | #endif |
70 | 70 | ||
71 | static struct mfc6_cache *mfc_unres_queue; /* Queue of unresolved entries */ | 71 | static struct mfc6_cache *mfc_unres_queue; /* Queue of unresolved entries */ |
72 | static atomic_t cache_resolve_queue_len; /* Size of unresolved */ | ||
73 | 72 | ||
74 | /* Special spinlock for queue of unresolved entries */ | 73 | /* Special spinlock for queue of unresolved entries */ |
75 | static DEFINE_SPINLOCK(mfc_unres_lock); | 74 | static DEFINE_SPINLOCK(mfc_unres_lock); |
@@ -519,7 +518,7 @@ static void ip6mr_destroy_unres(struct mfc6_cache *c) | |||
519 | { | 518 | { |
520 | struct sk_buff *skb; | 519 | struct sk_buff *skb; |
521 | 520 | ||
522 | atomic_dec(&cache_resolve_queue_len); | 521 | atomic_dec(&init_net.ipv6.cache_resolve_queue_len); |
523 | 522 | ||
524 | while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) { | 523 | while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) { |
525 | if (ipv6_hdr(skb)->version == 0) { | 524 | if (ipv6_hdr(skb)->version == 0) { |
@@ -561,7 +560,7 @@ static void ipmr_do_expire_process(unsigned long dummy) | |||
561 | ip6mr_destroy_unres(c); | 560 | ip6mr_destroy_unres(c); |
562 | } | 561 | } |
563 | 562 | ||
564 | if (atomic_read(&cache_resolve_queue_len)) | 563 | if (mfc_unres_queue != NULL) |
565 | mod_timer(&ipmr_expire_timer, jiffies + expires); | 564 | mod_timer(&ipmr_expire_timer, jiffies + expires); |
566 | } | 565 | } |
567 | 566 | ||
@@ -572,7 +571,7 @@ static void ipmr_expire_process(unsigned long dummy) | |||
572 | return; | 571 | return; |
573 | } | 572 | } |
574 | 573 | ||
575 | if (atomic_read(&cache_resolve_queue_len)) | 574 | if (mfc_unres_queue != NULL) |
576 | ipmr_do_expire_process(dummy); | 575 | ipmr_do_expire_process(dummy); |
577 | 576 | ||
578 | spin_unlock(&mfc_unres_lock); | 577 | spin_unlock(&mfc_unres_lock); |
@@ -852,7 +851,8 @@ ip6mr_cache_unresolved(mifi_t mifi, struct sk_buff *skb) | |||
852 | 851 | ||
853 | spin_lock_bh(&mfc_unres_lock); | 852 | spin_lock_bh(&mfc_unres_lock); |
854 | for (c = mfc_unres_queue; c; c = c->next) { | 853 | for (c = mfc_unres_queue; c; c = c->next) { |
855 | if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) && | 854 | if (net_eq(mfc6_net(c), &init_net) && |
855 | ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) && | ||
856 | ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) | 856 | ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) |
857 | break; | 857 | break; |
858 | } | 858 | } |
@@ -862,7 +862,7 @@ ip6mr_cache_unresolved(mifi_t mifi, struct sk_buff *skb) | |||
862 | * Create a new entry if allowable | 862 | * Create a new entry if allowable |
863 | */ | 863 | */ |
864 | 864 | ||
865 | if (atomic_read(&cache_resolve_queue_len) >= 10 || | 865 | if (atomic_read(&init_net.ipv6.cache_resolve_queue_len) >= 10 || |
866 | (c = ip6mr_cache_alloc_unres(&init_net)) == NULL) { | 866 | (c = ip6mr_cache_alloc_unres(&init_net)) == NULL) { |
867 | spin_unlock_bh(&mfc_unres_lock); | 867 | spin_unlock_bh(&mfc_unres_lock); |
868 | 868 | ||
@@ -891,7 +891,7 @@ ip6mr_cache_unresolved(mifi_t mifi, struct sk_buff *skb) | |||
891 | return err; | 891 | return err; |
892 | } | 892 | } |
893 | 893 | ||
894 | atomic_inc(&cache_resolve_queue_len); | 894 | atomic_inc(&init_net.ipv6.cache_resolve_queue_len); |
895 | c->next = mfc_unres_queue; | 895 | c->next = mfc_unres_queue; |
896 | mfc_unres_queue = c; | 896 | mfc_unres_queue = c; |
897 | 897 | ||
@@ -1119,14 +1119,16 @@ static int ip6mr_mfc_add(struct mf6cctl *mfc, int mrtsock) | |||
1119 | spin_lock_bh(&mfc_unres_lock); | 1119 | spin_lock_bh(&mfc_unres_lock); |
1120 | for (cp = &mfc_unres_queue; (uc = *cp) != NULL; | 1120 | for (cp = &mfc_unres_queue; (uc = *cp) != NULL; |
1121 | cp = &uc->next) { | 1121 | cp = &uc->next) { |
1122 | if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) && | 1122 | if (net_eq(mfc6_net(uc), &init_net) && |
1123 | ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) && | ||
1123 | ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) { | 1124 | ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) { |
1124 | *cp = uc->next; | 1125 | *cp = uc->next; |
1125 | if (atomic_dec_and_test(&cache_resolve_queue_len)) | 1126 | atomic_dec(&init_net.ipv6.cache_resolve_queue_len); |
1126 | del_timer(&ipmr_expire_timer); | ||
1127 | break; | 1127 | break; |
1128 | } | 1128 | } |
1129 | } | 1129 | } |
1130 | if (mfc_unres_queue == NULL) | ||
1131 | del_timer(&ipmr_expire_timer); | ||
1130 | spin_unlock_bh(&mfc_unres_lock); | 1132 | spin_unlock_bh(&mfc_unres_lock); |
1131 | 1133 | ||
1132 | if (uc) { | 1134 | if (uc) { |
@@ -1172,18 +1174,18 @@ static void mroute_clean_tables(struct sock *sk) | |||
1172 | } | 1174 | } |
1173 | } | 1175 | } |
1174 | 1176 | ||
1175 | if (atomic_read(&cache_resolve_queue_len) != 0) { | 1177 | if (atomic_read(&init_net.ipv6.cache_resolve_queue_len) != 0) { |
1176 | struct mfc6_cache *c; | 1178 | struct mfc6_cache *c, **cp; |
1177 | 1179 | ||
1178 | spin_lock_bh(&mfc_unres_lock); | 1180 | spin_lock_bh(&mfc_unres_lock); |
1179 | while (mfc_unres_queue != NULL) { | 1181 | cp = &mfc_unres_queue; |
1180 | c = mfc_unres_queue; | 1182 | while ((c = *cp) != NULL) { |
1181 | mfc_unres_queue = c->next; | 1183 | if (!net_eq(mfc6_net(c), &init_net)) { |
1182 | spin_unlock_bh(&mfc_unres_lock); | 1184 | cp = &c->next; |
1183 | 1185 | continue; | |
1186 | } | ||
1187 | *cp = c->next; | ||
1184 | ip6mr_destroy_unres(c); | 1188 | ip6mr_destroy_unres(c); |
1185 | |||
1186 | spin_lock_bh(&mfc_unres_lock); | ||
1187 | } | 1189 | } |
1188 | spin_unlock_bh(&mfc_unres_lock); | 1190 | spin_unlock_bh(&mfc_unres_lock); |
1189 | } | 1191 | } |