aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/mcast.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv6/mcast.c')
-rw-r--r--net/ipv6/mcast.c41
1 files changed, 31 insertions, 10 deletions
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 592eba61e78a..9648de2b6745 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1237,7 +1237,7 @@ static void mld_update_qri(struct inet6_dev *idev,
1237} 1237}
1238 1238
1239static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld, 1239static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
1240 unsigned long *max_delay) 1240 unsigned long *max_delay, bool v1_query)
1241{ 1241{
1242 unsigned long mldv1_md; 1242 unsigned long mldv1_md;
1243 1243
@@ -1245,11 +1245,32 @@ static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
1245 if (mld_in_v2_mode_only(idev)) 1245 if (mld_in_v2_mode_only(idev))
1246 return -EINVAL; 1246 return -EINVAL;
1247 1247
1248 /* MLDv1 router present */
1249 mldv1_md = ntohs(mld->mld_maxdelay); 1248 mldv1_md = ntohs(mld->mld_maxdelay);
1249
1250 /* When in MLDv1 fallback and a MLDv2 router start-up being
1251 * unaware of current MLDv1 operation, the MRC == MRD mapping
1252 * only works when the exponential algorithm is not being
1253 * used (as MLDv1 is unaware of such things).
1254 *
1255 * According to the RFC author, the MLDv2 implementations
1256 * he's aware of all use a MRC < 32768 on start up queries.
1257 *
1258 * Thus, should we *ever* encounter something else larger
1259 * than that, just assume the maximum possible within our
1260 * reach.
1261 */
1262 if (!v1_query)
1263 mldv1_md = min(mldv1_md, MLDV1_MRD_MAX_COMPAT);
1264
1250 *max_delay = max(msecs_to_jiffies(mldv1_md), 1UL); 1265 *max_delay = max(msecs_to_jiffies(mldv1_md), 1UL);
1251 1266
1252 mld_set_v1_mode(idev); 1267 /* MLDv1 router present: we need to go into v1 mode *only*
1268 * when an MLDv1 query is received as per section 9.12. of
1269 * RFC3810! And we know from RFC2710 section 3.7 that MLDv1
1270 * queries MUST be of exactly 24 octets.
1271 */
1272 if (v1_query)
1273 mld_set_v1_mode(idev);
1253 1274
1254 /* cancel MLDv2 report timer */ 1275 /* cancel MLDv2 report timer */
1255 mld_gq_stop_timer(idev); 1276 mld_gq_stop_timer(idev);
@@ -1264,10 +1285,6 @@ static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
1264static int mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld, 1285static int mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
1265 unsigned long *max_delay) 1286 unsigned long *max_delay)
1266{ 1287{
1267 /* hosts need to stay in MLDv1 mode, discard MLDv2 queries */
1268 if (mld_in_v1_mode(idev))
1269 return -EINVAL;
1270
1271 *max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL); 1288 *max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL);
1272 1289
1273 mld_update_qrv(idev, mld); 1290 mld_update_qrv(idev, mld);
@@ -1324,8 +1341,11 @@ int igmp6_event_query(struct sk_buff *skb)
1324 !(group_type&IPV6_ADDR_MULTICAST)) 1341 !(group_type&IPV6_ADDR_MULTICAST))
1325 return -EINVAL; 1342 return -EINVAL;
1326 1343
1327 if (len == MLD_V1_QUERY_LEN) { 1344 if (len < MLD_V1_QUERY_LEN) {
1328 err = mld_process_v1(idev, mld, &max_delay); 1345 return -EINVAL;
1346 } else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) {
1347 err = mld_process_v1(idev, mld, &max_delay,
1348 len == MLD_V1_QUERY_LEN);
1329 if (err < 0) 1349 if (err < 0)
1330 return err; 1350 return err;
1331 } else if (len >= MLD_V2_QUERY_LEN_MIN) { 1351 } else if (len >= MLD_V2_QUERY_LEN_MIN) {
@@ -1357,8 +1377,9 @@ int igmp6_event_query(struct sk_buff *skb)
1357 mlh2 = (struct mld2_query *)skb_transport_header(skb); 1377 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1358 mark = 1; 1378 mark = 1;
1359 } 1379 }
1360 } else 1380 } else {
1361 return -EINVAL; 1381 return -EINVAL;
1382 }
1362 1383
1363 read_lock_bh(&idev->lock); 1384 read_lock_bh(&idev->lock);
1364 if (group_type == IPV6_ADDR_ANY) { 1385 if (group_type == IPV6_ADDR_ANY) {