diff options
-rw-r--r-- | include/net/netns/ipv6.h | 1 | ||||
-rw-r--r-- | net/ipv6/ip6mr.c | 40 |
2 files changed, 22 insertions, 19 deletions
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 14c1bbe68a85..30572f3f9781 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h | |||
@@ -60,6 +60,7 @@ struct netns_ipv6 { | |||
60 | struct mfc6_cache **mfc6_cache_array; | 60 | struct mfc6_cache **mfc6_cache_array; |
61 | struct mif_device *vif6_table; | 61 | struct mif_device *vif6_table; |
62 | int maxvif; | 62 | int maxvif; |
63 | atomic_t cache_resolve_queue_len; | ||
63 | #endif | 64 | #endif |
64 | }; | 65 | }; |
65 | #endif | 66 | #endif |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 287e526ba036..077c8198eb53 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -69,7 +69,6 @@ static int mroute_do_pim; | |||
69 | #endif | 69 | #endif |
70 | 70 | ||
71 | static struct mfc6_cache *mfc_unres_queue; /* Queue of unresolved entries */ | 71 | static struct mfc6_cache *mfc_unres_queue; /* Queue of unresolved entries */ |
72 | static atomic_t cache_resolve_queue_len; /* Size of unresolved */ | ||
73 | 72 | ||
74 | /* Special spinlock for queue of unresolved entries */ | 73 | /* Special spinlock for queue of unresolved entries */ |
75 | static DEFINE_SPINLOCK(mfc_unres_lock); | 74 | static DEFINE_SPINLOCK(mfc_unres_lock); |
@@ -519,7 +518,7 @@ static void ip6mr_destroy_unres(struct mfc6_cache *c) | |||
519 | { | 518 | { |
520 | struct sk_buff *skb; | 519 | struct sk_buff *skb; |
521 | 520 | ||
522 | atomic_dec(&cache_resolve_queue_len); | 521 | atomic_dec(&init_net.ipv6.cache_resolve_queue_len); |
523 | 522 | ||
524 | while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) { | 523 | while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) { |
525 | if (ipv6_hdr(skb)->version == 0) { | 524 | if (ipv6_hdr(skb)->version == 0) { |
@@ -561,7 +560,7 @@ static void ipmr_do_expire_process(unsigned long dummy) | |||
561 | ip6mr_destroy_unres(c); | 560 | ip6mr_destroy_unres(c); |
562 | } | 561 | } |
563 | 562 | ||
564 | if (atomic_read(&cache_resolve_queue_len)) | 563 | if (mfc_unres_queue != NULL) |
565 | mod_timer(&ipmr_expire_timer, jiffies + expires); | 564 | mod_timer(&ipmr_expire_timer, jiffies + expires); |
566 | } | 565 | } |
567 | 566 | ||
@@ -572,7 +571,7 @@ static void ipmr_expire_process(unsigned long dummy) | |||
572 | return; | 571 | return; |
573 | } | 572 | } |
574 | 573 | ||
575 | if (atomic_read(&cache_resolve_queue_len)) | 574 | if (mfc_unres_queue != NULL) |
576 | ipmr_do_expire_process(dummy); | 575 | ipmr_do_expire_process(dummy); |
577 | 576 | ||
578 | spin_unlock(&mfc_unres_lock); | 577 | spin_unlock(&mfc_unres_lock); |
@@ -852,7 +851,8 @@ ip6mr_cache_unresolved(mifi_t mifi, struct sk_buff *skb) | |||
852 | 851 | ||
853 | spin_lock_bh(&mfc_unres_lock); | 852 | spin_lock_bh(&mfc_unres_lock); |
854 | for (c = mfc_unres_queue; c; c = c->next) { | 853 | for (c = mfc_unres_queue; c; c = c->next) { |
855 | if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) && | 854 | if (net_eq(mfc6_net(c), &init_net) && |
855 | ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) && | ||
856 | ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) | 856 | ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) |
857 | break; | 857 | break; |
858 | } | 858 | } |
@@ -862,7 +862,7 @@ ip6mr_cache_unresolved(mifi_t mifi, struct sk_buff *skb) | |||
862 | * Create a new entry if allowable | 862 | * Create a new entry if allowable |
863 | */ | 863 | */ |
864 | 864 | ||
865 | if (atomic_read(&cache_resolve_queue_len) >= 10 || | 865 | if (atomic_read(&init_net.ipv6.cache_resolve_queue_len) >= 10 || |
866 | (c = ip6mr_cache_alloc_unres(&init_net)) == NULL) { | 866 | (c = ip6mr_cache_alloc_unres(&init_net)) == NULL) { |
867 | spin_unlock_bh(&mfc_unres_lock); | 867 | spin_unlock_bh(&mfc_unres_lock); |
868 | 868 | ||
@@ -891,7 +891,7 @@ ip6mr_cache_unresolved(mifi_t mifi, struct sk_buff *skb) | |||
891 | return err; | 891 | return err; |
892 | } | 892 | } |
893 | 893 | ||
894 | atomic_inc(&cache_resolve_queue_len); | 894 | atomic_inc(&init_net.ipv6.cache_resolve_queue_len); |
895 | c->next = mfc_unres_queue; | 895 | c->next = mfc_unres_queue; |
896 | mfc_unres_queue = c; | 896 | mfc_unres_queue = c; |
897 | 897 | ||
@@ -1119,14 +1119,16 @@ static int ip6mr_mfc_add(struct mf6cctl *mfc, int mrtsock) | |||
1119 | spin_lock_bh(&mfc_unres_lock); | 1119 | spin_lock_bh(&mfc_unres_lock); |
1120 | for (cp = &mfc_unres_queue; (uc = *cp) != NULL; | 1120 | for (cp = &mfc_unres_queue; (uc = *cp) != NULL; |
1121 | cp = &uc->next) { | 1121 | cp = &uc->next) { |
1122 | if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) && | 1122 | if (net_eq(mfc6_net(uc), &init_net) && |
1123 | ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) && | ||
1123 | ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) { | 1124 | ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) { |
1124 | *cp = uc->next; | 1125 | *cp = uc->next; |
1125 | if (atomic_dec_and_test(&cache_resolve_queue_len)) | 1126 | atomic_dec(&init_net.ipv6.cache_resolve_queue_len); |
1126 | del_timer(&ipmr_expire_timer); | ||
1127 | break; | 1127 | break; |
1128 | } | 1128 | } |
1129 | } | 1129 | } |
1130 | if (mfc_unres_queue == NULL) | ||
1131 | del_timer(&ipmr_expire_timer); | ||
1130 | spin_unlock_bh(&mfc_unres_lock); | 1132 | spin_unlock_bh(&mfc_unres_lock); |
1131 | 1133 | ||
1132 | if (uc) { | 1134 | if (uc) { |
@@ -1172,18 +1174,18 @@ static void mroute_clean_tables(struct sock *sk) | |||
1172 | } | 1174 | } |
1173 | } | 1175 | } |
1174 | 1176 | ||
1175 | if (atomic_read(&cache_resolve_queue_len) != 0) { | 1177 | if (atomic_read(&init_net.ipv6.cache_resolve_queue_len) != 0) { |
1176 | struct mfc6_cache *c; | 1178 | struct mfc6_cache *c, **cp; |
1177 | 1179 | ||
1178 | spin_lock_bh(&mfc_unres_lock); | 1180 | spin_lock_bh(&mfc_unres_lock); |
1179 | while (mfc_unres_queue != NULL) { | 1181 | cp = &mfc_unres_queue; |
1180 | c = mfc_unres_queue; | 1182 | while ((c = *cp) != NULL) { |
1181 | mfc_unres_queue = c->next; | 1183 | if (!net_eq(mfc6_net(c), &init_net)) { |
1182 | spin_unlock_bh(&mfc_unres_lock); | 1184 | cp = &c->next; |
1183 | 1185 | continue; | |
1186 | } | ||
1187 | *cp = c->next; | ||
1184 | ip6mr_destroy_unres(c); | 1188 | ip6mr_destroy_unres(c); |
1185 | |||
1186 | spin_lock_bh(&mfc_unres_lock); | ||
1187 | } | 1189 | } |
1188 | spin_unlock_bh(&mfc_unres_lock); | 1190 | spin_unlock_bh(&mfc_unres_lock); |
1189 | } | 1191 | } |