aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/mcast.c
diff options
context:
space:
mode:
authorFlavio Leitner <fbl@redhat.com>2014-01-16 16:27:59 -0500
committerDavid S. Miller <davem@davemloft.net>2014-01-17 21:12:29 -0500
commit6a7cc41872dd46719fde09756e55e870cd453da8 (patch)
tree5fe2f208acd35fffae7787800cd9832a5c1a71a6 /net/ipv6/mcast.c
parentc3bc40e28be45cb168bae2b0b0a0731742eb4fa9 (diff)
ipv6: send Change Status Report after DAD is completed
The RFC 3810 defines two type of messages for multicast listeners. The "Current State Report" message, as the name implies, refreshes the *current* state to the querier. Since the querier sends Query messages periodically, there is no need to retransmit the report. On the other hand, any change should be reported immediately using "State Change Report" messages. Since it's an event triggered by a change and that it can be affected by packet loss, the rfc states it should be retransmitted [RobVar] times to make sure routers will receive timely. Currently, we are sending "Current State Reports" after DAD is completed. Before that, we send messages using unspecified address (::) which should be silently discarded by routers. This patch changes to send "State Change Report" messages after DAD is completed fixing the behavior to be RFC compliant and also to pass TAHI IPv6 testsuite. Signed-off-by: Flavio Leitner <fbl@redhat.com> Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/mcast.c')
-rw-r--r--net/ipv6/mcast.c57
1 files changed, 33 insertions, 24 deletions
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 7ff82b3e54b6..e1e47350784b 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1665,7 +1665,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1665 skb_tailroom(skb)) : 0) 1665 skb_tailroom(skb)) : 0)
1666 1666
1667static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, 1667static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1668 int type, int gdeleted, int sdeleted) 1668 int type, int gdeleted, int sdeleted, int crsend)
1669{ 1669{
1670 struct inet6_dev *idev = pmc->idev; 1670 struct inet6_dev *idev = pmc->idev;
1671 struct net_device *dev = idev->dev; 1671 struct net_device *dev = idev->dev;
@@ -1757,7 +1757,7 @@ empty_source:
1757 if (type == MLD2_ALLOW_NEW_SOURCES || 1757 if (type == MLD2_ALLOW_NEW_SOURCES ||
1758 type == MLD2_BLOCK_OLD_SOURCES) 1758 type == MLD2_BLOCK_OLD_SOURCES)
1759 return skb; 1759 return skb;
1760 if (pmc->mca_crcount || isquery) { 1760 if (pmc->mca_crcount || isquery || crsend) {
1761 /* make sure we have room for group header */ 1761 /* make sure we have room for group header */
1762 if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) { 1762 if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) {
1763 mld_sendpack(skb); 1763 mld_sendpack(skb);
@@ -1789,7 +1789,7 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
1789 type = MLD2_MODE_IS_EXCLUDE; 1789 type = MLD2_MODE_IS_EXCLUDE;
1790 else 1790 else
1791 type = MLD2_MODE_IS_INCLUDE; 1791 type = MLD2_MODE_IS_INCLUDE;
1792 skb = add_grec(skb, pmc, type, 0, 0); 1792 skb = add_grec(skb, pmc, type, 0, 0, 0);
1793 spin_unlock_bh(&pmc->mca_lock); 1793 spin_unlock_bh(&pmc->mca_lock);
1794 } 1794 }
1795 } else { 1795 } else {
@@ -1798,7 +1798,7 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
1798 type = MLD2_MODE_IS_EXCLUDE; 1798 type = MLD2_MODE_IS_EXCLUDE;
1799 else 1799 else
1800 type = MLD2_MODE_IS_INCLUDE; 1800 type = MLD2_MODE_IS_INCLUDE;
1801 skb = add_grec(skb, pmc, type, 0, 0); 1801 skb = add_grec(skb, pmc, type, 0, 0, 0);
1802 spin_unlock_bh(&pmc->mca_lock); 1802 spin_unlock_bh(&pmc->mca_lock);
1803 } 1803 }
1804 read_unlock_bh(&idev->lock); 1804 read_unlock_bh(&idev->lock);
@@ -1843,13 +1843,13 @@ static void mld_send_cr(struct inet6_dev *idev)
1843 if (pmc->mca_sfmode == MCAST_INCLUDE) { 1843 if (pmc->mca_sfmode == MCAST_INCLUDE) {
1844 type = MLD2_BLOCK_OLD_SOURCES; 1844 type = MLD2_BLOCK_OLD_SOURCES;
1845 dtype = MLD2_BLOCK_OLD_SOURCES; 1845 dtype = MLD2_BLOCK_OLD_SOURCES;
1846 skb = add_grec(skb, pmc, type, 1, 0); 1846 skb = add_grec(skb, pmc, type, 1, 0, 0);
1847 skb = add_grec(skb, pmc, dtype, 1, 1); 1847 skb = add_grec(skb, pmc, dtype, 1, 1, 0);
1848 } 1848 }
1849 if (pmc->mca_crcount) { 1849 if (pmc->mca_crcount) {
1850 if (pmc->mca_sfmode == MCAST_EXCLUDE) { 1850 if (pmc->mca_sfmode == MCAST_EXCLUDE) {
1851 type = MLD2_CHANGE_TO_INCLUDE; 1851 type = MLD2_CHANGE_TO_INCLUDE;
1852 skb = add_grec(skb, pmc, type, 1, 0); 1852 skb = add_grec(skb, pmc, type, 1, 0, 0);
1853 } 1853 }
1854 pmc->mca_crcount--; 1854 pmc->mca_crcount--;
1855 if (pmc->mca_crcount == 0) { 1855 if (pmc->mca_crcount == 0) {
@@ -1880,8 +1880,8 @@ static void mld_send_cr(struct inet6_dev *idev)
1880 type = MLD2_ALLOW_NEW_SOURCES; 1880 type = MLD2_ALLOW_NEW_SOURCES;
1881 dtype = MLD2_BLOCK_OLD_SOURCES; 1881 dtype = MLD2_BLOCK_OLD_SOURCES;
1882 } 1882 }
1883 skb = add_grec(skb, pmc, type, 0, 0); 1883 skb = add_grec(skb, pmc, type, 0, 0, 0);
1884 skb = add_grec(skb, pmc, dtype, 0, 1); /* deleted sources */ 1884 skb = add_grec(skb, pmc, dtype, 0, 1, 0); /* deleted sources */
1885 1885
1886 /* filter mode changes */ 1886 /* filter mode changes */
1887 if (pmc->mca_crcount) { 1887 if (pmc->mca_crcount) {
@@ -1889,7 +1889,7 @@ static void mld_send_cr(struct inet6_dev *idev)
1889 type = MLD2_CHANGE_TO_EXCLUDE; 1889 type = MLD2_CHANGE_TO_EXCLUDE;
1890 else 1890 else
1891 type = MLD2_CHANGE_TO_INCLUDE; 1891 type = MLD2_CHANGE_TO_INCLUDE;
1892 skb = add_grec(skb, pmc, type, 0, 0); 1892 skb = add_grec(skb, pmc, type, 0, 0, 0);
1893 pmc->mca_crcount--; 1893 pmc->mca_crcount--;
1894 } 1894 }
1895 spin_unlock_bh(&pmc->mca_lock); 1895 spin_unlock_bh(&pmc->mca_lock);
@@ -1997,27 +1997,36 @@ err_out:
1997 goto out; 1997 goto out;
1998} 1998}
1999 1999
2000static void mld_resend_report(struct inet6_dev *idev) 2000static void mld_send_initial_cr(struct inet6_dev *idev)
2001{ 2001{
2002 if (mld_in_v1_mode(idev)) { 2002 struct sk_buff *skb;
2003 struct ifmcaddr6 *mcaddr; 2003 struct ifmcaddr6 *pmc;
2004 read_lock_bh(&idev->lock); 2004 int type;
2005 for (mcaddr = idev->mc_list; mcaddr; mcaddr = mcaddr->next) { 2005
2006 if (!(mcaddr->mca_flags & MAF_NOREPORT)) 2006 if (mld_in_v1_mode(idev))
2007 igmp6_send(&mcaddr->mca_addr, idev->dev, 2007 return;
2008 ICMPV6_MGM_REPORT); 2008
2009 } 2009 skb = NULL;
2010 read_unlock_bh(&idev->lock); 2010 read_lock_bh(&idev->lock);
2011 } else { 2011 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
2012 mld_send_report(idev, NULL); 2012 spin_lock_bh(&pmc->mca_lock);
2013 if (pmc->mca_sfcount[MCAST_EXCLUDE])
2014 type = MLD2_CHANGE_TO_EXCLUDE;
2015 else
2016 type = MLD2_CHANGE_TO_INCLUDE;
2017 skb = add_grec(skb, pmc, type, 0, 0, 1);
2018 spin_unlock_bh(&pmc->mca_lock);
2013 } 2019 }
2020 read_unlock_bh(&idev->lock);
2021 if (skb)
2022 mld_sendpack(skb);
2014} 2023}
2015 2024
2016void ipv6_mc_dad_complete(struct inet6_dev *idev) 2025void ipv6_mc_dad_complete(struct inet6_dev *idev)
2017{ 2026{
2018 idev->mc_dad_count = idev->mc_qrv; 2027 idev->mc_dad_count = idev->mc_qrv;
2019 if (idev->mc_dad_count) { 2028 if (idev->mc_dad_count) {
2020 mld_resend_report(idev); 2029 mld_send_initial_cr(idev);
2021 idev->mc_dad_count--; 2030 idev->mc_dad_count--;
2022 if (idev->mc_dad_count) 2031 if (idev->mc_dad_count)
2023 mld_dad_start_timer(idev, idev->mc_maxdelay); 2032 mld_dad_start_timer(idev, idev->mc_maxdelay);
@@ -2028,7 +2037,7 @@ static void mld_dad_timer_expire(unsigned long data)
2028{ 2037{
2029 struct inet6_dev *idev = (struct inet6_dev *)data; 2038 struct inet6_dev *idev = (struct inet6_dev *)data;
2030 2039
2031 mld_resend_report(idev); 2040 mld_send_initial_cr(idev);
2032 if (idev->mc_dad_count) { 2041 if (idev->mc_dad_count) {
2033 idev->mc_dad_count--; 2042 idev->mc_dad_count--;
2034 if (idev->mc_dad_count) 2043 if (idev->mc_dad_count)