aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid L Stevens <dlstevens@us.ibm.com>2005-12-27 17:03:00 -0500
committerDavid S. Miller <davem@davemloft.net>2005-12-27 17:03:00 -0500
commit5ab4a6c81eb3dbe32361791d1535f9153f79b0ed (patch)
treec89504389bf1a3452a499db9ea19acea76941616
parent1b93ae64cabe5e28dd5a1f35f96f938ca4f6ae20 (diff)
[IPV6] mcast: Fix multiple issues in MLDv2 reports.
The below "jumbo" patch fixes the following problems in MLDv2. 1) Add necessary "ntohs" to recent "pskb_may_pull" check [breaks all nonzero source queries on little-endian (!)] 2) Add locking to source filter list [resend of prior patch] 3) fix "mld_marksources()" to a) send nothing when all queried sources are excluded b) send full exclude report when source queried sources are not excluded c) don't schedule a timer when there's nothing to report NOTE: RFC 3810 specifies the source list should be saved and each source reported individually as an IS_IN. This is an obvious DOS path, requiring the host to store and then multicast as many sources as are queried (e.g., millions...). This alternative sends a full, relevant report that's limited to number of sources present on the machine. 4) fix "add_grec()" to send empty-source records when it should The original check doesn't account for a non-empty source list with all sources inactive; the new code keeps that short-circuit case, and also generates the group header with an empty list if needed. 5) fix mca_crcount decrement to be after add_grec(), which needs its original value These issues (other than item #1 ;-) ) were all found by Yan Zheng, much thanks! Signed-off-by: David L Stevens <dlstevens@us.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/if_inet6.h1
-rw-r--r--net/ipv6/mcast.c140
2 files changed, 111 insertions, 30 deletions
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index d8234f9bd4c4..eb8afe3499a9 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -83,6 +83,7 @@ struct ipv6_mc_socklist
83 struct in6_addr addr; 83 struct in6_addr addr;
84 int ifindex; 84 int ifindex;
85 struct ipv6_mc_socklist *next; 85 struct ipv6_mc_socklist *next;
86 rwlock_t sflock;
86 unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */ 87 unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */
87 struct ip6_sf_socklist *sflist; 88 struct ip6_sf_socklist *sflist;
88}; 89};
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 057d8619ba13..f829a4ad3ccc 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -224,6 +224,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, struct in6_addr *addr)
224 224
225 mc_lst->ifindex = dev->ifindex; 225 mc_lst->ifindex = dev->ifindex;
226 mc_lst->sfmode = MCAST_EXCLUDE; 226 mc_lst->sfmode = MCAST_EXCLUDE;
227 mc_lst->sflock = RW_LOCK_UNLOCKED;
227 mc_lst->sflist = NULL; 228 mc_lst->sflist = NULL;
228 229
229 /* 230 /*
@@ -360,6 +361,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
360 struct ip6_sf_socklist *psl; 361 struct ip6_sf_socklist *psl;
361 int i, j, rv; 362 int i, j, rv;
362 int leavegroup = 0; 363 int leavegroup = 0;
364 int pmclocked = 0;
363 int err; 365 int err;
364 366
365 if (pgsr->gsr_group.ss_family != AF_INET6 || 367 if (pgsr->gsr_group.ss_family != AF_INET6 ||
@@ -403,6 +405,9 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
403 pmc->sfmode = omode; 405 pmc->sfmode = omode;
404 } 406 }
405 407
408 write_lock_bh(&pmc->sflock);
409 pmclocked = 1;
410
406 psl = pmc->sflist; 411 psl = pmc->sflist;
407 if (!add) { 412 if (!add) {
408 if (!psl) 413 if (!psl)
@@ -475,6 +480,8 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
475 /* update the interface list */ 480 /* update the interface list */
476 ip6_mc_add_src(idev, group, omode, 1, source, 1); 481 ip6_mc_add_src(idev, group, omode, 1, source, 1);
477done: 482done:
483 if (pmclocked)
484 write_unlock_bh(&pmc->sflock);
478 read_unlock_bh(&ipv6_sk_mc_lock); 485 read_unlock_bh(&ipv6_sk_mc_lock);
479 read_unlock_bh(&idev->lock); 486 read_unlock_bh(&idev->lock);
480 in6_dev_put(idev); 487 in6_dev_put(idev);
@@ -510,6 +517,8 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
510 dev = idev->dev; 517 dev = idev->dev;
511 518
512 err = 0; 519 err = 0;
520 read_lock_bh(&ipv6_sk_mc_lock);
521
513 if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) { 522 if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
514 leavegroup = 1; 523 leavegroup = 1;
515 goto done; 524 goto done;
@@ -549,6 +558,8 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
549 newpsl = NULL; 558 newpsl = NULL;
550 (void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0); 559 (void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
551 } 560 }
561
562 write_lock_bh(&pmc->sflock);
552 psl = pmc->sflist; 563 psl = pmc->sflist;
553 if (psl) { 564 if (psl) {
554 (void) ip6_mc_del_src(idev, group, pmc->sfmode, 565 (void) ip6_mc_del_src(idev, group, pmc->sfmode,
@@ -558,8 +569,10 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
558 (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); 569 (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
559 pmc->sflist = newpsl; 570 pmc->sflist = newpsl;
560 pmc->sfmode = gsf->gf_fmode; 571 pmc->sfmode = gsf->gf_fmode;
572 write_unlock_bh(&pmc->sflock);
561 err = 0; 573 err = 0;
562done: 574done:
575 read_unlock_bh(&ipv6_sk_mc_lock);
563 read_unlock_bh(&idev->lock); 576 read_unlock_bh(&idev->lock);
564 in6_dev_put(idev); 577 in6_dev_put(idev);
565 dev_put(dev); 578 dev_put(dev);
@@ -592,6 +605,11 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
592 dev = idev->dev; 605 dev = idev->dev;
593 606
594 err = -EADDRNOTAVAIL; 607 err = -EADDRNOTAVAIL;
608 /*
609 * changes to the ipv6_mc_list require the socket lock and
610 * a read lock on ip6_sk_mc_lock. We have the socket lock,
611 * so reading the list is safe.
612 */
595 613
596 for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) { 614 for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) {
597 if (pmc->ifindex != gsf->gf_interface) 615 if (pmc->ifindex != gsf->gf_interface)
@@ -614,6 +632,10 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
614 copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) { 632 copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) {
615 return -EFAULT; 633 return -EFAULT;
616 } 634 }
635 /* changes to psl require the socket lock, a read lock on
636 * on ipv6_sk_mc_lock and a write lock on pmc->sflock. We
637 * have the socket lock, so reading here is safe.
638 */
617 for (i=0; i<copycount; i++) { 639 for (i=0; i<copycount; i++) {
618 struct sockaddr_in6 *psin6; 640 struct sockaddr_in6 *psin6;
619 struct sockaddr_storage ss; 641 struct sockaddr_storage ss;
@@ -650,6 +672,7 @@ int inet6_mc_check(struct sock *sk, struct in6_addr *mc_addr,
650 read_unlock(&ipv6_sk_mc_lock); 672 read_unlock(&ipv6_sk_mc_lock);
651 return 1; 673 return 1;
652 } 674 }
675 read_lock(&mc->sflock);
653 psl = mc->sflist; 676 psl = mc->sflist;
654 if (!psl) { 677 if (!psl) {
655 rv = mc->sfmode == MCAST_EXCLUDE; 678 rv = mc->sfmode == MCAST_EXCLUDE;
@@ -665,6 +688,7 @@ int inet6_mc_check(struct sock *sk, struct in6_addr *mc_addr,
665 if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count) 688 if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
666 rv = 0; 689 rv = 0;
667 } 690 }
691 read_unlock(&mc->sflock);
668 read_unlock(&ipv6_sk_mc_lock); 692 read_unlock(&ipv6_sk_mc_lock);
669 693
670 return rv; 694 return rv;
@@ -1068,7 +1092,8 @@ static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
1068 ma->mca_flags |= MAF_TIMER_RUNNING; 1092 ma->mca_flags |= MAF_TIMER_RUNNING;
1069} 1093}
1070 1094
1071static void mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, 1095/* mark EXCLUDE-mode sources */
1096static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1072 struct in6_addr *srcs) 1097 struct in6_addr *srcs)
1073{ 1098{
1074 struct ip6_sf_list *psf; 1099 struct ip6_sf_list *psf;
@@ -1078,13 +1103,53 @@ static void mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1078 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { 1103 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1079 if (scount == nsrcs) 1104 if (scount == nsrcs)
1080 break; 1105 break;
1081 for (i=0; i<nsrcs; i++) 1106 for (i=0; i<nsrcs; i++) {
1107 /* skip inactive filters */
1108 if (pmc->mca_sfcount[MCAST_INCLUDE] ||
1109 pmc->mca_sfcount[MCAST_EXCLUDE] !=
1110 psf->sf_count[MCAST_EXCLUDE])
1111 continue;
1112 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1113 scount++;
1114 break;
1115 }
1116 }
1117 }
1118 pmc->mca_flags &= ~MAF_GSQUERY;
1119 if (scount == nsrcs) /* all sources excluded */
1120 return 0;
1121 return 1;
1122}
1123
1124static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1125 struct in6_addr *srcs)
1126{
1127 struct ip6_sf_list *psf;
1128 int i, scount;
1129
1130 if (pmc->mca_sfmode == MCAST_EXCLUDE)
1131 return mld_xmarksources(pmc, nsrcs, srcs);
1132
1133 /* mark INCLUDE-mode sources */
1134
1135 scount = 0;
1136 for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
1137 if (scount == nsrcs)
1138 break;
1139 for (i=0; i<nsrcs; i++) {
1082 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) { 1140 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1083 psf->sf_gsresp = 1; 1141 psf->sf_gsresp = 1;
1084 scount++; 1142 scount++;
1085 break; 1143 break;
1086 } 1144 }
1145 }
1146 }
1147 if (!scount) {
1148 pmc->mca_flags &= ~MAF_GSQUERY;
1149 return 0;
1087 } 1150 }
1151 pmc->mca_flags |= MAF_GSQUERY;
1152 return 1;
1088} 1153}
1089 1154
1090int igmp6_event_query(struct sk_buff *skb) 1155int igmp6_event_query(struct sk_buff *skb)
@@ -1167,7 +1232,7 @@ int igmp6_event_query(struct sk_buff *skb)
1167 /* mark sources to include, if group & source-specific */ 1232 /* mark sources to include, if group & source-specific */
1168 if (mlh2->nsrcs != 0) { 1233 if (mlh2->nsrcs != 0) {
1169 if (!pskb_may_pull(skb, srcs_offset + 1234 if (!pskb_may_pull(skb, srcs_offset +
1170 mlh2->nsrcs * sizeof(struct in6_addr))) { 1235 ntohs(mlh2->nsrcs) * sizeof(struct in6_addr))) {
1171 in6_dev_put(idev); 1236 in6_dev_put(idev);
1172 return -EINVAL; 1237 return -EINVAL;
1173 } 1238 }
@@ -1203,10 +1268,9 @@ int igmp6_event_query(struct sk_buff *skb)
1203 else 1268 else
1204 ma->mca_flags &= ~MAF_GSQUERY; 1269 ma->mca_flags &= ~MAF_GSQUERY;
1205 } 1270 }
1206 if (ma->mca_flags & MAF_GSQUERY) 1271 if (!(ma->mca_flags & MAF_GSQUERY) ||
1207 mld_marksources(ma, ntohs(mlh2->nsrcs), 1272 mld_marksources(ma, ntohs(mlh2->nsrcs), mlh2->srcs))
1208 mlh2->srcs); 1273 igmp6_group_queried(ma, max_delay);
1209 igmp6_group_queried(ma, max_delay);
1210 spin_unlock_bh(&ma->mca_lock); 1274 spin_unlock_bh(&ma->mca_lock);
1211 if (group_type != IPV6_ADDR_ANY) 1275 if (group_type != IPV6_ADDR_ANY)
1212 break; 1276 break;
@@ -1281,7 +1345,18 @@ static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
1281 case MLD2_MODE_IS_EXCLUDE: 1345 case MLD2_MODE_IS_EXCLUDE:
1282 if (gdeleted || sdeleted) 1346 if (gdeleted || sdeleted)
1283 return 0; 1347 return 0;
1284 return !((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp); 1348 if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
1349 if (pmc->mca_sfmode == MCAST_INCLUDE)
1350 return 1;
1351 /* don't include if this source is excluded
1352 * in all filters
1353 */
1354 if (psf->sf_count[MCAST_INCLUDE])
1355 return 0;
1356 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1357 psf->sf_count[MCAST_EXCLUDE];
1358 }
1359 return 0;
1285 case MLD2_CHANGE_TO_INCLUDE: 1360 case MLD2_CHANGE_TO_INCLUDE:
1286 if (gdeleted || sdeleted) 1361 if (gdeleted || sdeleted)
1287 return 0; 1362 return 0;
@@ -1450,7 +1525,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1450 struct mld2_report *pmr; 1525 struct mld2_report *pmr;
1451 struct mld2_grec *pgr = NULL; 1526 struct mld2_grec *pgr = NULL;
1452 struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list; 1527 struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
1453 int scount, first, isquery, truncate; 1528 int scount, stotal, first, isquery, truncate;
1454 1529
1455 if (pmc->mca_flags & MAF_NOREPORT) 1530 if (pmc->mca_flags & MAF_NOREPORT)
1456 return skb; 1531 return skb;
@@ -1460,25 +1535,13 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1460 truncate = type == MLD2_MODE_IS_EXCLUDE || 1535 truncate = type == MLD2_MODE_IS_EXCLUDE ||
1461 type == MLD2_CHANGE_TO_EXCLUDE; 1536 type == MLD2_CHANGE_TO_EXCLUDE;
1462 1537
1538 stotal = scount = 0;
1539
1463 psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources; 1540 psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources;
1464 1541
1465 if (!*psf_list) { 1542 if (!*psf_list)
1466 if (type == MLD2_ALLOW_NEW_SOURCES || 1543 goto empty_source;
1467 type == MLD2_BLOCK_OLD_SOURCES) 1544
1468 return skb;
1469 if (pmc->mca_crcount || isquery) {
1470 /* make sure we have room for group header and at
1471 * least one source.
1472 */
1473 if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)+
1474 sizeof(struct in6_addr)) {
1475 mld_sendpack(skb);
1476 skb = NULL; /* add_grhead will get a new one */
1477 }
1478 skb = add_grhead(skb, pmc, type, &pgr);
1479 }
1480 return skb;
1481 }
1482 pmr = skb ? (struct mld2_report *)skb->h.raw : NULL; 1545 pmr = skb ? (struct mld2_report *)skb->h.raw : NULL;
1483 1546
1484 /* EX and TO_EX get a fresh packet, if needed */ 1547 /* EX and TO_EX get a fresh packet, if needed */
@@ -1491,7 +1554,6 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1491 } 1554 }
1492 } 1555 }
1493 first = 1; 1556 first = 1;
1494 scount = 0;
1495 psf_prev = NULL; 1557 psf_prev = NULL;
1496 for (psf=*psf_list; psf; psf=psf_next) { 1558 for (psf=*psf_list; psf; psf=psf_next) {
1497 struct in6_addr *psrc; 1559 struct in6_addr *psrc;
@@ -1525,7 +1587,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1525 } 1587 }
1526 psrc = (struct in6_addr *)skb_put(skb, sizeof(*psrc)); 1588 psrc = (struct in6_addr *)skb_put(skb, sizeof(*psrc));
1527 *psrc = psf->sf_addr; 1589 *psrc = psf->sf_addr;
1528 scount++; 1590 scount++; stotal++;
1529 if ((type == MLD2_ALLOW_NEW_SOURCES || 1591 if ((type == MLD2_ALLOW_NEW_SOURCES ||
1530 type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) { 1592 type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
1531 psf->sf_crcount--; 1593 psf->sf_crcount--;
@@ -1540,6 +1602,21 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1540 } 1602 }
1541 psf_prev = psf; 1603 psf_prev = psf;
1542 } 1604 }
1605
1606empty_source:
1607 if (!stotal) {
1608 if (type == MLD2_ALLOW_NEW_SOURCES ||
1609 type == MLD2_BLOCK_OLD_SOURCES)
1610 return skb;
1611 if (pmc->mca_crcount || isquery) {
1612 /* make sure we have room for group header */
1613 if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) {
1614 mld_sendpack(skb);
1615 skb = NULL; /* add_grhead will get a new one */
1616 }
1617 skb = add_grhead(skb, pmc, type, &pgr);
1618 }
1619 }
1543 if (pgr) 1620 if (pgr)
1544 pgr->grec_nsrcs = htons(scount); 1621 pgr->grec_nsrcs = htons(scount);
1545 1622
@@ -1621,11 +1698,11 @@ static void mld_send_cr(struct inet6_dev *idev)
1621 skb = add_grec(skb, pmc, dtype, 1, 1); 1698 skb = add_grec(skb, pmc, dtype, 1, 1);
1622 } 1699 }
1623 if (pmc->mca_crcount) { 1700 if (pmc->mca_crcount) {
1624 pmc->mca_crcount--;
1625 if (pmc->mca_sfmode == MCAST_EXCLUDE) { 1701 if (pmc->mca_sfmode == MCAST_EXCLUDE) {
1626 type = MLD2_CHANGE_TO_INCLUDE; 1702 type = MLD2_CHANGE_TO_INCLUDE;
1627 skb = add_grec(skb, pmc, type, 1, 0); 1703 skb = add_grec(skb, pmc, type, 1, 0);
1628 } 1704 }
1705 pmc->mca_crcount--;
1629 if (pmc->mca_crcount == 0) { 1706 if (pmc->mca_crcount == 0) {
1630 mld_clear_zeros(&pmc->mca_tomb); 1707 mld_clear_zeros(&pmc->mca_tomb);
1631 mld_clear_zeros(&pmc->mca_sources); 1708 mld_clear_zeros(&pmc->mca_sources);
@@ -1659,12 +1736,12 @@ static void mld_send_cr(struct inet6_dev *idev)
1659 1736
1660 /* filter mode changes */ 1737 /* filter mode changes */
1661 if (pmc->mca_crcount) { 1738 if (pmc->mca_crcount) {
1662 pmc->mca_crcount--;
1663 if (pmc->mca_sfmode == MCAST_EXCLUDE) 1739 if (pmc->mca_sfmode == MCAST_EXCLUDE)
1664 type = MLD2_CHANGE_TO_EXCLUDE; 1740 type = MLD2_CHANGE_TO_EXCLUDE;
1665 else 1741 else
1666 type = MLD2_CHANGE_TO_INCLUDE; 1742 type = MLD2_CHANGE_TO_INCLUDE;
1667 skb = add_grec(skb, pmc, type, 0, 0); 1743 skb = add_grec(skb, pmc, type, 0, 0);
1744 pmc->mca_crcount--;
1668 } 1745 }
1669 spin_unlock_bh(&pmc->mca_lock); 1746 spin_unlock_bh(&pmc->mca_lock);
1670 } 1747 }
@@ -2023,6 +2100,9 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
2023{ 2100{
2024 int err; 2101 int err;
2025 2102
2103 /* callers have the socket lock and a write lock on ipv6_sk_mc_lock,
2104 * so no other readers or writers of iml or its sflist
2105 */
2026 if (iml->sflist == 0) { 2106 if (iml->sflist == 0) {
2027 /* any-source empty exclude case */ 2107 /* any-source empty exclude case */
2028 return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0); 2108 return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);