diff options
author | Benjamin Thery <benjamin.thery@bull.net> | 2009-01-21 23:56:19 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-01-22 16:57:39 -0500 |
commit | 1e8fb3b6a4ac6c5e486298d88289038456957545 (patch) | |
tree | ff2c02ed95edcf510e553c15c8abd476cdc6d669 /net/ipv4/ipmr.c | |
parent | 2bb8b26c3ea8bde1943dc5cd4dda2dc9f48fb281 (diff) |
netns: ipmr: declare counter cache_resolve_queue_len per-namespace
Preliminary work to make IPv4 multicast routing netns-aware.
Declare variable cache_resolve_queue_len per-namespace: move it into
struct netns_ipv4.
This variable counts the number of unresolved cache entries queued in the
list mfc_unres_queue. This list is kept global to all netns as the number
of entries per namespace is limited to 10 (hardcoded in routine
ipmr_cache_unresolved).
Entries belonging to different namespaces in mfc_unres_queue will be
identified by matching the mfc_net member introduced previously in
struct mfc_cache.
Keeping this list global to all netns, also allows us to keep a single
timer (ipmr_expire_timer) to handle their expiration.
In some places cache_resolve_queue_len value was tested for arming
or deleting the timer. These tests were equivalent to testing
mfc_unres_queue value instead and are replaced in this patch.
At the moment, cache_resolve_queue_len is only referenced in init_net.
Signed-off-by: Benjamin Thery <benjamin.thery@bull.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/ipmr.c')
-rw-r--r-- | net/ipv4/ipmr.c | 39 |
1 files changed, 21 insertions, 18 deletions
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 35b868dd3bfd..feafd14eb7b9 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -83,7 +83,6 @@ static int mroute_do_assert; /* Set in PIM assert */ | |||
83 | static int mroute_do_pim; | 83 | static int mroute_do_pim; |
84 | 84 | ||
85 | static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */ | 85 | static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */ |
86 | static atomic_t cache_resolve_queue_len; /* Size of unresolved */ | ||
87 | 86 | ||
88 | /* Special spinlock for queue of unresolved entries */ | 87 | /* Special spinlock for queue of unresolved entries */ |
89 | static DEFINE_SPINLOCK(mfc_unres_lock); | 88 | static DEFINE_SPINLOCK(mfc_unres_lock); |
@@ -340,7 +339,7 @@ static void ipmr_destroy_unres(struct mfc_cache *c) | |||
340 | struct sk_buff *skb; | 339 | struct sk_buff *skb; |
341 | struct nlmsgerr *e; | 340 | struct nlmsgerr *e; |
342 | 341 | ||
343 | atomic_dec(&cache_resolve_queue_len); | 342 | atomic_dec(&init_net.ipv4.cache_resolve_queue_len); |
344 | 343 | ||
345 | while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { | 344 | while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { |
346 | if (ip_hdr(skb)->version == 0) { | 345 | if (ip_hdr(skb)->version == 0) { |
@@ -374,7 +373,7 @@ static void ipmr_expire_process(unsigned long dummy) | |||
374 | return; | 373 | return; |
375 | } | 374 | } |
376 | 375 | ||
377 | if (atomic_read(&cache_resolve_queue_len) == 0) | 376 | if (mfc_unres_queue == NULL) |
378 | goto out; | 377 | goto out; |
379 | 378 | ||
380 | now = jiffies; | 379 | now = jiffies; |
@@ -395,7 +394,7 @@ static void ipmr_expire_process(unsigned long dummy) | |||
395 | ipmr_destroy_unres(c); | 394 | ipmr_destroy_unres(c); |
396 | } | 395 | } |
397 | 396 | ||
398 | if (atomic_read(&cache_resolve_queue_len)) | 397 | if (mfc_unres_queue != NULL) |
399 | mod_timer(&ipmr_expire_timer, jiffies + expires); | 398 | mod_timer(&ipmr_expire_timer, jiffies + expires); |
400 | 399 | ||
401 | out: | 400 | out: |
@@ -690,7 +689,8 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) | |||
690 | 689 | ||
691 | spin_lock_bh(&mfc_unres_lock); | 690 | spin_lock_bh(&mfc_unres_lock); |
692 | for (c=mfc_unres_queue; c; c=c->next) { | 691 | for (c=mfc_unres_queue; c; c=c->next) { |
693 | if (c->mfc_mcastgrp == iph->daddr && | 692 | if (net_eq(mfc_net(c), &init_net) && |
693 | c->mfc_mcastgrp == iph->daddr && | ||
694 | c->mfc_origin == iph->saddr) | 694 | c->mfc_origin == iph->saddr) |
695 | break; | 695 | break; |
696 | } | 696 | } |
@@ -700,7 +700,7 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) | |||
700 | * Create a new entry if allowable | 700 | * Create a new entry if allowable |
701 | */ | 701 | */ |
702 | 702 | ||
703 | if (atomic_read(&cache_resolve_queue_len) >= 10 || | 703 | if (atomic_read(&init_net.ipv4.cache_resolve_queue_len) >= 10 || |
704 | (c = ipmr_cache_alloc_unres(&init_net)) == NULL) { | 704 | (c = ipmr_cache_alloc_unres(&init_net)) == NULL) { |
705 | spin_unlock_bh(&mfc_unres_lock); | 705 | spin_unlock_bh(&mfc_unres_lock); |
706 | 706 | ||
@@ -729,7 +729,7 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) | |||
729 | return err; | 729 | return err; |
730 | } | 730 | } |
731 | 731 | ||
732 | atomic_inc(&cache_resolve_queue_len); | 732 | atomic_inc(&init_net.ipv4.cache_resolve_queue_len); |
733 | c->next = mfc_unres_queue; | 733 | c->next = mfc_unres_queue; |
734 | mfc_unres_queue = c; | 734 | mfc_unres_queue = c; |
735 | 735 | ||
@@ -827,14 +827,16 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock) | |||
827 | spin_lock_bh(&mfc_unres_lock); | 827 | spin_lock_bh(&mfc_unres_lock); |
828 | for (cp = &mfc_unres_queue; (uc=*cp) != NULL; | 828 | for (cp = &mfc_unres_queue; (uc=*cp) != NULL; |
829 | cp = &uc->next) { | 829 | cp = &uc->next) { |
830 | if (uc->mfc_origin == c->mfc_origin && | 830 | if (net_eq(mfc_net(uc), &init_net) && |
831 | uc->mfc_origin == c->mfc_origin && | ||
831 | uc->mfc_mcastgrp == c->mfc_mcastgrp) { | 832 | uc->mfc_mcastgrp == c->mfc_mcastgrp) { |
832 | *cp = uc->next; | 833 | *cp = uc->next; |
833 | if (atomic_dec_and_test(&cache_resolve_queue_len)) | 834 | atomic_dec(&init_net.ipv4.cache_resolve_queue_len); |
834 | del_timer(&ipmr_expire_timer); | ||
835 | break; | 835 | break; |
836 | } | 836 | } |
837 | } | 837 | } |
838 | if (mfc_unres_queue == NULL) | ||
839 | del_timer(&ipmr_expire_timer); | ||
838 | spin_unlock_bh(&mfc_unres_lock); | 840 | spin_unlock_bh(&mfc_unres_lock); |
839 | 841 | ||
840 | if (uc) { | 842 | if (uc) { |
@@ -880,18 +882,19 @@ static void mroute_clean_tables(struct sock *sk) | |||
880 | } | 882 | } |
881 | } | 883 | } |
882 | 884 | ||
883 | if (atomic_read(&cache_resolve_queue_len) != 0) { | 885 | if (atomic_read(&init_net.ipv4.cache_resolve_queue_len) != 0) { |
884 | struct mfc_cache *c; | 886 | struct mfc_cache *c, **cp; |
885 | 887 | ||
886 | spin_lock_bh(&mfc_unres_lock); | 888 | spin_lock_bh(&mfc_unres_lock); |
887 | while (mfc_unres_queue != NULL) { | 889 | cp = &mfc_unres_queue; |
888 | c = mfc_unres_queue; | 890 | while ((c = *cp) != NULL) { |
889 | mfc_unres_queue = c->next; | 891 | if (!net_eq(mfc_net(c), &init_net)) { |
890 | spin_unlock_bh(&mfc_unres_lock); | 892 | cp = &c->next; |
893 | continue; | ||
894 | } | ||
895 | *cp = c->next; | ||
891 | 896 | ||
892 | ipmr_destroy_unres(c); | 897 | ipmr_destroy_unres(c); |
893 | |||
894 | spin_lock_bh(&mfc_unres_lock); | ||
895 | } | 898 | } |
896 | spin_unlock_bh(&mfc_unres_lock); | 899 | spin_unlock_bh(&mfc_unres_lock); |
897 | } | 900 | } |