diff options
author | David L Stevens <dlstevens@us.ibm.com> | 2006-01-24 16:06:39 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2006-01-24 16:06:39 -0500 |
commit | 7add2a439868d636910fb6a216b12c7392778956 (patch) | |
tree | 84cf4e20ce5038cb701373c6d9f97195dfd23dfc /net/ipv6 | |
parent | 151bb0ffe51514979abf54063bb5c1dd49365137 (diff) |
[IPV6] MLDv2: fix change records when transitioning to/from inactive
The following patch fixes these problems in MLDv2:
1) Add/remove "delete" records for sending change reports when
addition of a filter results in that filter transitioning to/from
inactive. [same as recent IPv4 IGMPv3 fix]
2) Remove 2 redundant "group_type" checks (can't be IPV6_ADDR_ANY
within that loop, so checks are always true)
3) change an is_in() "return 0" to "return type == MLD2_MODE_IS_INCLUDE".
It should always be "0" to get here, but it improves code locality
to not assume it, and if some race allowed otherwise, doing
the check would return the correct result.
Signed-off-by: David L Stevens <dlstevens@us.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r-- | net/ipv6/mcast.c | 56 |
1 files changed, 47 insertions, 9 deletions
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 6c05c7978bef..4420948a1bfe 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -1252,8 +1252,7 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1252 | } | 1252 | } |
1253 | } else { | 1253 | } else { |
1254 | for (ma = idev->mc_list; ma; ma=ma->next) { | 1254 | for (ma = idev->mc_list; ma; ma=ma->next) { |
1255 | if (group_type != IPV6_ADDR_ANY && | 1255 | if (!ipv6_addr_equal(group, &ma->mca_addr)) |
1256 | !ipv6_addr_equal(group, &ma->mca_addr)) | ||
1257 | continue; | 1256 | continue; |
1258 | spin_lock_bh(&ma->mca_lock); | 1257 | spin_lock_bh(&ma->mca_lock); |
1259 | if (ma->mca_flags & MAF_TIMER_RUNNING) { | 1258 | if (ma->mca_flags & MAF_TIMER_RUNNING) { |
@@ -1268,11 +1267,10 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1268 | ma->mca_flags &= ~MAF_GSQUERY; | 1267 | ma->mca_flags &= ~MAF_GSQUERY; |
1269 | } | 1268 | } |
1270 | if (!(ma->mca_flags & MAF_GSQUERY) || | 1269 | if (!(ma->mca_flags & MAF_GSQUERY) || |
1271 | mld_marksources(ma, ntohs(mlh2->nsrcs), mlh2->srcs)) | 1270 | mld_marksources(ma, ntohs(mlh2->nsrcs), mlh2->srcs)) |
1272 | igmp6_group_queried(ma, max_delay); | 1271 | igmp6_group_queried(ma, max_delay); |
1273 | spin_unlock_bh(&ma->mca_lock); | 1272 | spin_unlock_bh(&ma->mca_lock); |
1274 | if (group_type != IPV6_ADDR_ANY) | 1273 | break; |
1275 | break; | ||
1276 | } | 1274 | } |
1277 | } | 1275 | } |
1278 | read_unlock_bh(&idev->lock); | 1276 | read_unlock_bh(&idev->lock); |
@@ -1351,7 +1349,7 @@ static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type, | |||
1351 | * in all filters | 1349 | * in all filters |
1352 | */ | 1350 | */ |
1353 | if (psf->sf_count[MCAST_INCLUDE]) | 1351 | if (psf->sf_count[MCAST_INCLUDE]) |
1354 | return 0; | 1352 | return type == MLD2_MODE_IS_INCLUDE; |
1355 | return pmc->mca_sfcount[MCAST_EXCLUDE] == | 1353 | return pmc->mca_sfcount[MCAST_EXCLUDE] == |
1356 | psf->sf_count[MCAST_EXCLUDE]; | 1354 | psf->sf_count[MCAST_EXCLUDE]; |
1357 | } | 1355 | } |
@@ -1966,7 +1964,7 @@ static void sf_markstate(struct ifmcaddr6 *pmc) | |||
1966 | 1964 | ||
1967 | static int sf_setstate(struct ifmcaddr6 *pmc) | 1965 | static int sf_setstate(struct ifmcaddr6 *pmc) |
1968 | { | 1966 | { |
1969 | struct ip6_sf_list *psf; | 1967 | struct ip6_sf_list *psf, *dpsf; |
1970 | int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE]; | 1968 | int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE]; |
1971 | int qrv = pmc->idev->mc_qrv; | 1969 | int qrv = pmc->idev->mc_qrv; |
1972 | int new_in, rv; | 1970 | int new_in, rv; |
@@ -1978,8 +1976,48 @@ static int sf_setstate(struct ifmcaddr6 *pmc) | |||
1978 | !psf->sf_count[MCAST_INCLUDE]; | 1976 | !psf->sf_count[MCAST_INCLUDE]; |
1979 | } else | 1977 | } else |
1980 | new_in = psf->sf_count[MCAST_INCLUDE] != 0; | 1978 | new_in = psf->sf_count[MCAST_INCLUDE] != 0; |
1981 | if (new_in != psf->sf_oldin) { | 1979 | if (new_in) { |
1982 | psf->sf_crcount = qrv; | 1980 | if (!psf->sf_oldin) { |
1981 | struct ip6_sf_list *prev = 0; | ||
1982 | |||
1983 | for (dpsf=pmc->mca_tomb; dpsf; | ||
1984 | dpsf=dpsf->sf_next) { | ||
1985 | if (ipv6_addr_equal(&dpsf->sf_addr, | ||
1986 | &psf->sf_addr)) | ||
1987 | break; | ||
1988 | prev = dpsf; | ||
1989 | } | ||
1990 | if (dpsf) { | ||
1991 | if (prev) | ||
1992 | prev->sf_next = dpsf->sf_next; | ||
1993 | else | ||
1994 | pmc->mca_tomb = dpsf->sf_next; | ||
1995 | kfree(dpsf); | ||
1996 | } | ||
1997 | psf->sf_crcount = qrv; | ||
1998 | rv++; | ||
1999 | } | ||
2000 | } else if (psf->sf_oldin) { | ||
2001 | psf->sf_crcount = 0; | ||
2002 | /* | ||
2003 | * add or update "delete" records if an active filter | ||
2004 | * is now inactive | ||
2005 | */ | ||
2006 | for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next) | ||
2007 | if (ipv6_addr_equal(&dpsf->sf_addr, | ||
2008 | &psf->sf_addr)) | ||
2009 | break; | ||
2010 | if (!dpsf) { | ||
2011 | dpsf = (struct ip6_sf_list *) | ||
2012 | kmalloc(sizeof(*dpsf), GFP_ATOMIC); | ||
2013 | if (!dpsf) | ||
2014 | continue; | ||
2015 | *dpsf = *psf; | ||
2016 | /* pmc->mca_lock held by callers */ | ||
2017 | dpsf->sf_next = pmc->mca_tomb; | ||
2018 | pmc->mca_tomb = dpsf; | ||
2019 | } | ||
2020 | dpsf->sf_crcount = qrv; | ||
1983 | rv++; | 2021 | rv++; |
1984 | } | 2022 | } |
1985 | } | 2023 | } |