diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-11-12 00:46:50 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-11-12 16:18:57 -0500 |
commit | 1d7138de878d1d4210727c1200193e69596f93b3 (patch) | |
tree | f7abb08bfdf35d1e876f93d24ce44072d2f6ed72 /net/ipv4/igmp.c | |
parent | f5539b5bfa2e00f2a6fd35731db66142a2f327c0 (diff) |
igmp: RCU conversion of in_dev->mc_list
in_dev->mc_list is protected by one rwlock (in_dev->mc_list_lock).
This can easily be converted to a RCU protection.
Writers hold RTNL, so mc_list_lock is removed, not replaced by a
spinlock.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Cypher Wu <cypher.w@gmail.com>
Cc: Américo Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/igmp.c')
-rw-r--r-- | net/ipv4/igmp.c | 223 |
1 files changed, 104 insertions, 119 deletions
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 08d0d81ffc15..6f49d6c087da 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -149,11 +149,17 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc); | |||
149 | static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, | 149 | static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, |
150 | int sfcount, __be32 *psfsrc, int delta); | 150 | int sfcount, __be32 *psfsrc, int delta); |
151 | 151 | ||
152 | |||
153 | static void ip_mc_list_reclaim(struct rcu_head *head) | ||
154 | { | ||
155 | kfree(container_of(head, struct ip_mc_list, rcu)); | ||
156 | } | ||
157 | |||
152 | static void ip_ma_put(struct ip_mc_list *im) | 158 | static void ip_ma_put(struct ip_mc_list *im) |
153 | { | 159 | { |
154 | if (atomic_dec_and_test(&im->refcnt)) { | 160 | if (atomic_dec_and_test(&im->refcnt)) { |
155 | in_dev_put(im->interface); | 161 | in_dev_put(im->interface); |
156 | kfree(im); | 162 | call_rcu(&im->rcu, ip_mc_list_reclaim); |
157 | } | 163 | } |
158 | } | 164 | } |
159 | 165 | ||
@@ -163,7 +169,7 @@ static void ip_ma_put(struct ip_mc_list *im) | |||
163 | * Timer management | 169 | * Timer management |
164 | */ | 170 | */ |
165 | 171 | ||
166 | static __inline__ void igmp_stop_timer(struct ip_mc_list *im) | 172 | static void igmp_stop_timer(struct ip_mc_list *im) |
167 | { | 173 | { |
168 | spin_lock_bh(&im->lock); | 174 | spin_lock_bh(&im->lock); |
169 | if (del_timer(&im->timer)) | 175 | if (del_timer(&im->timer)) |
@@ -496,14 +502,24 @@ empty_source: | |||
496 | return skb; | 502 | return skb; |
497 | } | 503 | } |
498 | 504 | ||
505 | #define for_each_pmc_rcu(in_dev, pmc) \ | ||
506 | for (pmc = rcu_dereference(in_dev->mc_list); \ | ||
507 | pmc != NULL; \ | ||
508 | pmc = rcu_dereference(pmc->next_rcu)) | ||
509 | |||
510 | #define for_each_pmc_rtnl(in_dev, pmc) \ | ||
511 | for (pmc = rtnl_dereference(in_dev->mc_list); \ | ||
512 | pmc != NULL; \ | ||
513 | pmc = rtnl_dereference(pmc->next_rcu)) | ||
514 | |||
499 | static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc) | 515 | static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc) |
500 | { | 516 | { |
501 | struct sk_buff *skb = NULL; | 517 | struct sk_buff *skb = NULL; |
502 | int type; | 518 | int type; |
503 | 519 | ||
504 | if (!pmc) { | 520 | if (!pmc) { |
505 | read_lock(&in_dev->mc_list_lock); | 521 | rcu_read_lock(); |
506 | for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) { | 522 | for_each_pmc_rcu(in_dev, pmc) { |
507 | if (pmc->multiaddr == IGMP_ALL_HOSTS) | 523 | if (pmc->multiaddr == IGMP_ALL_HOSTS) |
508 | continue; | 524 | continue; |
509 | spin_lock_bh(&pmc->lock); | 525 | spin_lock_bh(&pmc->lock); |
@@ -514,7 +530,7 @@ static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc) | |||
514 | skb = add_grec(skb, pmc, type, 0, 0); | 530 | skb = add_grec(skb, pmc, type, 0, 0); |
515 | spin_unlock_bh(&pmc->lock); | 531 | spin_unlock_bh(&pmc->lock); |
516 | } | 532 | } |
517 | read_unlock(&in_dev->mc_list_lock); | 533 | rcu_read_unlock(); |
518 | } else { | 534 | } else { |
519 | spin_lock_bh(&pmc->lock); | 535 | spin_lock_bh(&pmc->lock); |
520 | if (pmc->sfcount[MCAST_EXCLUDE]) | 536 | if (pmc->sfcount[MCAST_EXCLUDE]) |
@@ -556,7 +572,7 @@ static void igmpv3_send_cr(struct in_device *in_dev) | |||
556 | struct sk_buff *skb = NULL; | 572 | struct sk_buff *skb = NULL; |
557 | int type, dtype; | 573 | int type, dtype; |
558 | 574 | ||
559 | read_lock(&in_dev->mc_list_lock); | 575 | rcu_read_lock(); |
560 | spin_lock_bh(&in_dev->mc_tomb_lock); | 576 | spin_lock_bh(&in_dev->mc_tomb_lock); |
561 | 577 | ||
562 | /* deleted MCA's */ | 578 | /* deleted MCA's */ |
@@ -593,7 +609,7 @@ static void igmpv3_send_cr(struct in_device *in_dev) | |||
593 | spin_unlock_bh(&in_dev->mc_tomb_lock); | 609 | spin_unlock_bh(&in_dev->mc_tomb_lock); |
594 | 610 | ||
595 | /* change recs */ | 611 | /* change recs */ |
596 | for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) { | 612 | for_each_pmc_rcu(in_dev, pmc) { |
597 | spin_lock_bh(&pmc->lock); | 613 | spin_lock_bh(&pmc->lock); |
598 | if (pmc->sfcount[MCAST_EXCLUDE]) { | 614 | if (pmc->sfcount[MCAST_EXCLUDE]) { |
599 | type = IGMPV3_BLOCK_OLD_SOURCES; | 615 | type = IGMPV3_BLOCK_OLD_SOURCES; |
@@ -616,7 +632,7 @@ static void igmpv3_send_cr(struct in_device *in_dev) | |||
616 | } | 632 | } |
617 | spin_unlock_bh(&pmc->lock); | 633 | spin_unlock_bh(&pmc->lock); |
618 | } | 634 | } |
619 | read_unlock(&in_dev->mc_list_lock); | 635 | rcu_read_unlock(); |
620 | 636 | ||
621 | if (!skb) | 637 | if (!skb) |
622 | return; | 638 | return; |
@@ -813,14 +829,14 @@ static void igmp_heard_report(struct in_device *in_dev, __be32 group) | |||
813 | if (group == IGMP_ALL_HOSTS) | 829 | if (group == IGMP_ALL_HOSTS) |
814 | return; | 830 | return; |
815 | 831 | ||
816 | read_lock(&in_dev->mc_list_lock); | 832 | rcu_read_lock(); |
817 | for (im=in_dev->mc_list; im!=NULL; im=im->next) { | 833 | for_each_pmc_rcu(in_dev, im) { |
818 | if (im->multiaddr == group) { | 834 | if (im->multiaddr == group) { |
819 | igmp_stop_timer(im); | 835 | igmp_stop_timer(im); |
820 | break; | 836 | break; |
821 | } | 837 | } |
822 | } | 838 | } |
823 | read_unlock(&in_dev->mc_list_lock); | 839 | rcu_read_unlock(); |
824 | } | 840 | } |
825 | 841 | ||
826 | static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, | 842 | static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, |
@@ -906,8 +922,8 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, | |||
906 | * - Use the igmp->igmp_code field as the maximum | 922 | * - Use the igmp->igmp_code field as the maximum |
907 | * delay possible | 923 | * delay possible |
908 | */ | 924 | */ |
909 | read_lock(&in_dev->mc_list_lock); | 925 | rcu_read_lock(); |
910 | for (im=in_dev->mc_list; im!=NULL; im=im->next) { | 926 | for_each_pmc_rcu(in_dev, im) { |
911 | int changed; | 927 | int changed; |
912 | 928 | ||
913 | if (group && group != im->multiaddr) | 929 | if (group && group != im->multiaddr) |
@@ -925,7 +941,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, | |||
925 | if (changed) | 941 | if (changed) |
926 | igmp_mod_timer(im, max_delay); | 942 | igmp_mod_timer(im, max_delay); |
927 | } | 943 | } |
928 | read_unlock(&in_dev->mc_list_lock); | 944 | rcu_read_unlock(); |
929 | } | 945 | } |
930 | 946 | ||
931 | /* called in rcu_read_lock() section */ | 947 | /* called in rcu_read_lock() section */ |
@@ -1110,8 +1126,8 @@ static void igmpv3_clear_delrec(struct in_device *in_dev) | |||
1110 | kfree(pmc); | 1126 | kfree(pmc); |
1111 | } | 1127 | } |
1112 | /* clear dead sources, too */ | 1128 | /* clear dead sources, too */ |
1113 | read_lock(&in_dev->mc_list_lock); | 1129 | rcu_read_lock(); |
1114 | for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) { | 1130 | for_each_pmc_rcu(in_dev, pmc) { |
1115 | struct ip_sf_list *psf, *psf_next; | 1131 | struct ip_sf_list *psf, *psf_next; |
1116 | 1132 | ||
1117 | spin_lock_bh(&pmc->lock); | 1133 | spin_lock_bh(&pmc->lock); |
@@ -1123,7 +1139,7 @@ static void igmpv3_clear_delrec(struct in_device *in_dev) | |||
1123 | kfree(psf); | 1139 | kfree(psf); |
1124 | } | 1140 | } |
1125 | } | 1141 | } |
1126 | read_unlock(&in_dev->mc_list_lock); | 1142 | rcu_read_unlock(); |
1127 | } | 1143 | } |
1128 | #endif | 1144 | #endif |
1129 | 1145 | ||
@@ -1209,7 +1225,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) | |||
1209 | 1225 | ||
1210 | ASSERT_RTNL(); | 1226 | ASSERT_RTNL(); |
1211 | 1227 | ||
1212 | for (im=in_dev->mc_list; im; im=im->next) { | 1228 | for_each_pmc_rtnl(in_dev, im) { |
1213 | if (im->multiaddr == addr) { | 1229 | if (im->multiaddr == addr) { |
1214 | im->users++; | 1230 | im->users++; |
1215 | ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0); | 1231 | ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0); |
@@ -1217,7 +1233,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) | |||
1217 | } | 1233 | } |
1218 | } | 1234 | } |
1219 | 1235 | ||
1220 | im = kmalloc(sizeof(*im), GFP_KERNEL); | 1236 | im = kzalloc(sizeof(*im), GFP_KERNEL); |
1221 | if (!im) | 1237 | if (!im) |
1222 | goto out; | 1238 | goto out; |
1223 | 1239 | ||
@@ -1227,26 +1243,18 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) | |||
1227 | im->multiaddr = addr; | 1243 | im->multiaddr = addr; |
1228 | /* initial mode is (EX, empty) */ | 1244 | /* initial mode is (EX, empty) */ |
1229 | im->sfmode = MCAST_EXCLUDE; | 1245 | im->sfmode = MCAST_EXCLUDE; |
1230 | im->sfcount[MCAST_INCLUDE] = 0; | ||
1231 | im->sfcount[MCAST_EXCLUDE] = 1; | 1246 | im->sfcount[MCAST_EXCLUDE] = 1; |
1232 | im->sources = NULL; | ||
1233 | im->tomb = NULL; | ||
1234 | im->crcount = 0; | ||
1235 | atomic_set(&im->refcnt, 1); | 1247 | atomic_set(&im->refcnt, 1); |
1236 | spin_lock_init(&im->lock); | 1248 | spin_lock_init(&im->lock); |
1237 | #ifdef CONFIG_IP_MULTICAST | 1249 | #ifdef CONFIG_IP_MULTICAST |
1238 | im->tm_running = 0; | ||
1239 | setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im); | 1250 | setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im); |
1240 | im->unsolicit_count = IGMP_Unsolicited_Report_Count; | 1251 | im->unsolicit_count = IGMP_Unsolicited_Report_Count; |
1241 | im->reporter = 0; | ||
1242 | im->gsquery = 0; | ||
1243 | #endif | 1252 | #endif |
1244 | im->loaded = 0; | 1253 | |
1245 | write_lock_bh(&in_dev->mc_list_lock); | 1254 | im->next_rcu = in_dev->mc_list; |
1246 | im->next = in_dev->mc_list; | ||
1247 | in_dev->mc_list = im; | ||
1248 | in_dev->mc_count++; | 1255 | in_dev->mc_count++; |
1249 | write_unlock_bh(&in_dev->mc_list_lock); | 1256 | rcu_assign_pointer(in_dev->mc_list, im); |
1257 | |||
1250 | #ifdef CONFIG_IP_MULTICAST | 1258 | #ifdef CONFIG_IP_MULTICAST |
1251 | igmpv3_del_delrec(in_dev, im->multiaddr); | 1259 | igmpv3_del_delrec(in_dev, im->multiaddr); |
1252 | #endif | 1260 | #endif |
@@ -1287,17 +1295,18 @@ EXPORT_SYMBOL(ip_mc_rejoin_group); | |||
1287 | 1295 | ||
1288 | void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) | 1296 | void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) |
1289 | { | 1297 | { |
1290 | struct ip_mc_list *i, **ip; | 1298 | struct ip_mc_list *i; |
1299 | struct ip_mc_list __rcu **ip; | ||
1291 | 1300 | ||
1292 | ASSERT_RTNL(); | 1301 | ASSERT_RTNL(); |
1293 | 1302 | ||
1294 | for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { | 1303 | for (ip = &in_dev->mc_list; |
1304 | (i = rtnl_dereference(*ip)) != NULL; | ||
1305 | ip = &i->next_rcu) { | ||
1295 | if (i->multiaddr == addr) { | 1306 | if (i->multiaddr == addr) { |
1296 | if (--i->users == 0) { | 1307 | if (--i->users == 0) { |
1297 | write_lock_bh(&in_dev->mc_list_lock); | 1308 | *ip = i->next_rcu; |
1298 | *ip = i->next; | ||
1299 | in_dev->mc_count--; | 1309 | in_dev->mc_count--; |
1300 | write_unlock_bh(&in_dev->mc_list_lock); | ||
1301 | igmp_group_dropped(i); | 1310 | igmp_group_dropped(i); |
1302 | 1311 | ||
1303 | if (!in_dev->dead) | 1312 | if (!in_dev->dead) |
@@ -1316,34 +1325,34 @@ EXPORT_SYMBOL(ip_mc_dec_group); | |||
1316 | 1325 | ||
1317 | void ip_mc_unmap(struct in_device *in_dev) | 1326 | void ip_mc_unmap(struct in_device *in_dev) |
1318 | { | 1327 | { |
1319 | struct ip_mc_list *i; | 1328 | struct ip_mc_list *pmc; |
1320 | 1329 | ||
1321 | ASSERT_RTNL(); | 1330 | ASSERT_RTNL(); |
1322 | 1331 | ||
1323 | for (i = in_dev->mc_list; i; i = i->next) | 1332 | for_each_pmc_rtnl(in_dev, pmc) |
1324 | igmp_group_dropped(i); | 1333 | igmp_group_dropped(pmc); |
1325 | } | 1334 | } |
1326 | 1335 | ||
1327 | void ip_mc_remap(struct in_device *in_dev) | 1336 | void ip_mc_remap(struct in_device *in_dev) |
1328 | { | 1337 | { |
1329 | struct ip_mc_list *i; | 1338 | struct ip_mc_list *pmc; |
1330 | 1339 | ||
1331 | ASSERT_RTNL(); | 1340 | ASSERT_RTNL(); |
1332 | 1341 | ||
1333 | for (i = in_dev->mc_list; i; i = i->next) | 1342 | for_each_pmc_rtnl(in_dev, pmc) |
1334 | igmp_group_added(i); | 1343 | igmp_group_added(pmc); |
1335 | } | 1344 | } |
1336 | 1345 | ||
1337 | /* Device going down */ | 1346 | /* Device going down */ |
1338 | 1347 | ||
1339 | void ip_mc_down(struct in_device *in_dev) | 1348 | void ip_mc_down(struct in_device *in_dev) |
1340 | { | 1349 | { |
1341 | struct ip_mc_list *i; | 1350 | struct ip_mc_list *pmc; |
1342 | 1351 | ||
1343 | ASSERT_RTNL(); | 1352 | ASSERT_RTNL(); |
1344 | 1353 | ||
1345 | for (i=in_dev->mc_list; i; i=i->next) | 1354 | for_each_pmc_rtnl(in_dev, pmc) |
1346 | igmp_group_dropped(i); | 1355 | igmp_group_dropped(pmc); |
1347 | 1356 | ||
1348 | #ifdef CONFIG_IP_MULTICAST | 1357 | #ifdef CONFIG_IP_MULTICAST |
1349 | in_dev->mr_ifc_count = 0; | 1358 | in_dev->mr_ifc_count = 0; |
@@ -1374,7 +1383,6 @@ void ip_mc_init_dev(struct in_device *in_dev) | |||
1374 | in_dev->mr_qrv = IGMP_Unsolicited_Report_Count; | 1383 | in_dev->mr_qrv = IGMP_Unsolicited_Report_Count; |
1375 | #endif | 1384 | #endif |
1376 | 1385 | ||
1377 | rwlock_init(&in_dev->mc_list_lock); | ||
1378 | spin_lock_init(&in_dev->mc_tomb_lock); | 1386 | spin_lock_init(&in_dev->mc_tomb_lock); |
1379 | } | 1387 | } |
1380 | 1388 | ||
@@ -1382,14 +1390,14 @@ void ip_mc_init_dev(struct in_device *in_dev) | |||
1382 | 1390 | ||
1383 | void ip_mc_up(struct in_device *in_dev) | 1391 | void ip_mc_up(struct in_device *in_dev) |
1384 | { | 1392 | { |
1385 | struct ip_mc_list *i; | 1393 | struct ip_mc_list *pmc; |
1386 | 1394 | ||
1387 | ASSERT_RTNL(); | 1395 | ASSERT_RTNL(); |
1388 | 1396 | ||
1389 | ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); | 1397 | ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); |
1390 | 1398 | ||
1391 | for (i=in_dev->mc_list; i; i=i->next) | 1399 | for_each_pmc_rtnl(in_dev, pmc) |
1392 | igmp_group_added(i); | 1400 | igmp_group_added(pmc); |
1393 | } | 1401 | } |
1394 | 1402 | ||
1395 | /* | 1403 | /* |
@@ -1405,17 +1413,13 @@ void ip_mc_destroy_dev(struct in_device *in_dev) | |||
1405 | /* Deactivate timers */ | 1413 | /* Deactivate timers */ |
1406 | ip_mc_down(in_dev); | 1414 | ip_mc_down(in_dev); |
1407 | 1415 | ||
1408 | write_lock_bh(&in_dev->mc_list_lock); | 1416 | while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) { |
1409 | while ((i = in_dev->mc_list) != NULL) { | 1417 | in_dev->mc_list = i->next_rcu; |
1410 | in_dev->mc_list = i->next; | ||
1411 | in_dev->mc_count--; | 1418 | in_dev->mc_count--; |
1412 | write_unlock_bh(&in_dev->mc_list_lock); | 1419 | |
1413 | igmp_group_dropped(i); | 1420 | igmp_group_dropped(i); |
1414 | ip_ma_put(i); | 1421 | ip_ma_put(i); |
1415 | |||
1416 | write_lock_bh(&in_dev->mc_list_lock); | ||
1417 | } | 1422 | } |
1418 | write_unlock_bh(&in_dev->mc_list_lock); | ||
1419 | } | 1423 | } |
1420 | 1424 | ||
1421 | /* RTNL is locked */ | 1425 | /* RTNL is locked */ |
@@ -1513,18 +1517,18 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode, | |||
1513 | 1517 | ||
1514 | if (!in_dev) | 1518 | if (!in_dev) |
1515 | return -ENODEV; | 1519 | return -ENODEV; |
1516 | read_lock(&in_dev->mc_list_lock); | 1520 | rcu_read_lock(); |
1517 | for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) { | 1521 | for_each_pmc_rcu(in_dev, pmc) { |
1518 | if (*pmca == pmc->multiaddr) | 1522 | if (*pmca == pmc->multiaddr) |
1519 | break; | 1523 | break; |
1520 | } | 1524 | } |
1521 | if (!pmc) { | 1525 | if (!pmc) { |
1522 | /* MCA not found?? bug */ | 1526 | /* MCA not found?? bug */ |
1523 | read_unlock(&in_dev->mc_list_lock); | 1527 | rcu_read_unlock(); |
1524 | return -ESRCH; | 1528 | return -ESRCH; |
1525 | } | 1529 | } |
1526 | spin_lock_bh(&pmc->lock); | 1530 | spin_lock_bh(&pmc->lock); |
1527 | read_unlock(&in_dev->mc_list_lock); | 1531 | rcu_read_unlock(); |
1528 | #ifdef CONFIG_IP_MULTICAST | 1532 | #ifdef CONFIG_IP_MULTICAST |
1529 | sf_markstate(pmc); | 1533 | sf_markstate(pmc); |
1530 | #endif | 1534 | #endif |
@@ -1685,18 +1689,18 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, | |||
1685 | 1689 | ||
1686 | if (!in_dev) | 1690 | if (!in_dev) |
1687 | return -ENODEV; | 1691 | return -ENODEV; |
1688 | read_lock(&in_dev->mc_list_lock); | 1692 | rcu_read_lock(); |
1689 | for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) { | 1693 | for_each_pmc_rcu(in_dev, pmc) { |
1690 | if (*pmca == pmc->multiaddr) | 1694 | if (*pmca == pmc->multiaddr) |
1691 | break; | 1695 | break; |
1692 | } | 1696 | } |
1693 | if (!pmc) { | 1697 | if (!pmc) { |
1694 | /* MCA not found?? bug */ | 1698 | /* MCA not found?? bug */ |
1695 | read_unlock(&in_dev->mc_list_lock); | 1699 | rcu_read_unlock(); |
1696 | return -ESRCH; | 1700 | return -ESRCH; |
1697 | } | 1701 | } |
1698 | spin_lock_bh(&pmc->lock); | 1702 | spin_lock_bh(&pmc->lock); |
1699 | read_unlock(&in_dev->mc_list_lock); | 1703 | rcu_read_unlock(); |
1700 | 1704 | ||
1701 | #ifdef CONFIG_IP_MULTICAST | 1705 | #ifdef CONFIG_IP_MULTICAST |
1702 | sf_markstate(pmc); | 1706 | sf_markstate(pmc); |
@@ -1793,7 +1797,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) | |||
1793 | 1797 | ||
1794 | err = -EADDRINUSE; | 1798 | err = -EADDRINUSE; |
1795 | ifindex = imr->imr_ifindex; | 1799 | ifindex = imr->imr_ifindex; |
1796 | for (i = inet->mc_list; i; i = i->next) { | 1800 | for_each_pmc_rtnl(inet, i) { |
1797 | if (i->multi.imr_multiaddr.s_addr == addr && | 1801 | if (i->multi.imr_multiaddr.s_addr == addr && |
1798 | i->multi.imr_ifindex == ifindex) | 1802 | i->multi.imr_ifindex == ifindex) |
1799 | goto done; | 1803 | goto done; |
@@ -1807,7 +1811,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) | |||
1807 | goto done; | 1811 | goto done; |
1808 | 1812 | ||
1809 | memcpy(&iml->multi, imr, sizeof(*imr)); | 1813 | memcpy(&iml->multi, imr, sizeof(*imr)); |
1810 | iml->next = inet->mc_list; | 1814 | iml->next_rcu = inet->mc_list; |
1811 | iml->sflist = NULL; | 1815 | iml->sflist = NULL; |
1812 | iml->sfmode = MCAST_EXCLUDE; | 1816 | iml->sfmode = MCAST_EXCLUDE; |
1813 | rcu_assign_pointer(inet->mc_list, iml); | 1817 | rcu_assign_pointer(inet->mc_list, iml); |
@@ -1821,17 +1825,14 @@ EXPORT_SYMBOL(ip_mc_join_group); | |||
1821 | 1825 | ||
1822 | static void ip_sf_socklist_reclaim(struct rcu_head *rp) | 1826 | static void ip_sf_socklist_reclaim(struct rcu_head *rp) |
1823 | { | 1827 | { |
1824 | struct ip_sf_socklist *psf; | 1828 | kfree(container_of(rp, struct ip_sf_socklist, rcu)); |
1825 | |||
1826 | psf = container_of(rp, struct ip_sf_socklist, rcu); | ||
1827 | /* sk_omem_alloc should have been decreased by the caller*/ | 1829 | /* sk_omem_alloc should have been decreased by the caller*/ |
1828 | kfree(psf); | ||
1829 | } | 1830 | } |
1830 | 1831 | ||
1831 | static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, | 1832 | static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, |
1832 | struct in_device *in_dev) | 1833 | struct in_device *in_dev) |
1833 | { | 1834 | { |
1834 | struct ip_sf_socklist *psf = iml->sflist; | 1835 | struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist); |
1835 | int err; | 1836 | int err; |
1836 | 1837 | ||
1837 | if (psf == NULL) { | 1838 | if (psf == NULL) { |
@@ -1851,11 +1852,8 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, | |||
1851 | 1852 | ||
1852 | static void ip_mc_socklist_reclaim(struct rcu_head *rp) | 1853 | static void ip_mc_socklist_reclaim(struct rcu_head *rp) |
1853 | { | 1854 | { |
1854 | struct ip_mc_socklist *iml; | 1855 | kfree(container_of(rp, struct ip_mc_socklist, rcu)); |
1855 | |||
1856 | iml = container_of(rp, struct ip_mc_socklist, rcu); | ||
1857 | /* sk_omem_alloc should have been decreased by the caller*/ | 1856 | /* sk_omem_alloc should have been decreased by the caller*/ |
1858 | kfree(iml); | ||
1859 | } | 1857 | } |
1860 | 1858 | ||
1861 | 1859 | ||
@@ -1866,7 +1864,8 @@ static void ip_mc_socklist_reclaim(struct rcu_head *rp) | |||
1866 | int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) | 1864 | int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) |
1867 | { | 1865 | { |
1868 | struct inet_sock *inet = inet_sk(sk); | 1866 | struct inet_sock *inet = inet_sk(sk); |
1869 | struct ip_mc_socklist *iml, **imlp; | 1867 | struct ip_mc_socklist *iml; |
1868 | struct ip_mc_socklist __rcu **imlp; | ||
1870 | struct in_device *in_dev; | 1869 | struct in_device *in_dev; |
1871 | struct net *net = sock_net(sk); | 1870 | struct net *net = sock_net(sk); |
1872 | __be32 group = imr->imr_multiaddr.s_addr; | 1871 | __be32 group = imr->imr_multiaddr.s_addr; |
@@ -1876,7 +1875,9 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) | |||
1876 | rtnl_lock(); | 1875 | rtnl_lock(); |
1877 | in_dev = ip_mc_find_dev(net, imr); | 1876 | in_dev = ip_mc_find_dev(net, imr); |
1878 | ifindex = imr->imr_ifindex; | 1877 | ifindex = imr->imr_ifindex; |
1879 | for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) { | 1878 | for (imlp = &inet->mc_list; |
1879 | (iml = rtnl_dereference(*imlp)) != NULL; | ||
1880 | imlp = &iml->next_rcu) { | ||
1880 | if (iml->multi.imr_multiaddr.s_addr != group) | 1881 | if (iml->multi.imr_multiaddr.s_addr != group) |
1881 | continue; | 1882 | continue; |
1882 | if (ifindex) { | 1883 | if (ifindex) { |
@@ -1888,7 +1889,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) | |||
1888 | 1889 | ||
1889 | (void) ip_mc_leave_src(sk, iml, in_dev); | 1890 | (void) ip_mc_leave_src(sk, iml, in_dev); |
1890 | 1891 | ||
1891 | rcu_assign_pointer(*imlp, iml->next); | 1892 | *imlp = iml->next_rcu; |
1892 | 1893 | ||
1893 | if (in_dev) | 1894 | if (in_dev) |
1894 | ip_mc_dec_group(in_dev, group); | 1895 | ip_mc_dec_group(in_dev, group); |
@@ -1934,7 +1935,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct | |||
1934 | } | 1935 | } |
1935 | err = -EADDRNOTAVAIL; | 1936 | err = -EADDRNOTAVAIL; |
1936 | 1937 | ||
1937 | for (pmc=inet->mc_list; pmc; pmc=pmc->next) { | 1938 | for_each_pmc_rtnl(inet, pmc) { |
1938 | if ((pmc->multi.imr_multiaddr.s_addr == | 1939 | if ((pmc->multi.imr_multiaddr.s_addr == |
1939 | imr.imr_multiaddr.s_addr) && | 1940 | imr.imr_multiaddr.s_addr) && |
1940 | (pmc->multi.imr_ifindex == imr.imr_ifindex)) | 1941 | (pmc->multi.imr_ifindex == imr.imr_ifindex)) |
@@ -1958,7 +1959,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct | |||
1958 | pmc->sfmode = omode; | 1959 | pmc->sfmode = omode; |
1959 | } | 1960 | } |
1960 | 1961 | ||
1961 | psl = pmc->sflist; | 1962 | psl = rtnl_dereference(pmc->sflist); |
1962 | if (!add) { | 1963 | if (!add) { |
1963 | if (!psl) | 1964 | if (!psl) |
1964 | goto done; /* err = -EADDRNOTAVAIL */ | 1965 | goto done; /* err = -EADDRNOTAVAIL */ |
@@ -2077,7 +2078,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) | |||
2077 | goto done; | 2078 | goto done; |
2078 | } | 2079 | } |
2079 | 2080 | ||
2080 | for (pmc=inet->mc_list; pmc; pmc=pmc->next) { | 2081 | for_each_pmc_rtnl(inet, pmc) { |
2081 | if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && | 2082 | if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && |
2082 | pmc->multi.imr_ifindex == imr.imr_ifindex) | 2083 | pmc->multi.imr_ifindex == imr.imr_ifindex) |
2083 | break; | 2084 | break; |
@@ -2107,7 +2108,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) | |||
2107 | (void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr, | 2108 | (void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr, |
2108 | msf->imsf_fmode, 0, NULL, 0); | 2109 | msf->imsf_fmode, 0, NULL, 0); |
2109 | } | 2110 | } |
2110 | psl = pmc->sflist; | 2111 | psl = rtnl_dereference(pmc->sflist); |
2111 | if (psl) { | 2112 | if (psl) { |
2112 | (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, | 2113 | (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, |
2113 | psl->sl_count, psl->sl_addr, 0); | 2114 | psl->sl_count, psl->sl_addr, 0); |
@@ -2155,7 +2156,7 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, | |||
2155 | } | 2156 | } |
2156 | err = -EADDRNOTAVAIL; | 2157 | err = -EADDRNOTAVAIL; |
2157 | 2158 | ||
2158 | for (pmc=inet->mc_list; pmc; pmc=pmc->next) { | 2159 | for_each_pmc_rtnl(inet, pmc) { |
2159 | if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && | 2160 | if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && |
2160 | pmc->multi.imr_ifindex == imr.imr_ifindex) | 2161 | pmc->multi.imr_ifindex == imr.imr_ifindex) |
2161 | break; | 2162 | break; |
@@ -2163,7 +2164,7 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, | |||
2163 | if (!pmc) /* must have a prior join */ | 2164 | if (!pmc) /* must have a prior join */ |
2164 | goto done; | 2165 | goto done; |
2165 | msf->imsf_fmode = pmc->sfmode; | 2166 | msf->imsf_fmode = pmc->sfmode; |
2166 | psl = pmc->sflist; | 2167 | psl = rtnl_dereference(pmc->sflist); |
2167 | rtnl_unlock(); | 2168 | rtnl_unlock(); |
2168 | if (!psl) { | 2169 | if (!psl) { |
2169 | len = 0; | 2170 | len = 0; |
@@ -2208,7 +2209,7 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, | |||
2208 | 2209 | ||
2209 | err = -EADDRNOTAVAIL; | 2210 | err = -EADDRNOTAVAIL; |
2210 | 2211 | ||
2211 | for (pmc=inet->mc_list; pmc; pmc=pmc->next) { | 2212 | for_each_pmc_rtnl(inet, pmc) { |
2212 | if (pmc->multi.imr_multiaddr.s_addr == addr && | 2213 | if (pmc->multi.imr_multiaddr.s_addr == addr && |
2213 | pmc->multi.imr_ifindex == gsf->gf_interface) | 2214 | pmc->multi.imr_ifindex == gsf->gf_interface) |
2214 | break; | 2215 | break; |
@@ -2216,7 +2217,7 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, | |||
2216 | if (!pmc) /* must have a prior join */ | 2217 | if (!pmc) /* must have a prior join */ |
2217 | goto done; | 2218 | goto done; |
2218 | gsf->gf_fmode = pmc->sfmode; | 2219 | gsf->gf_fmode = pmc->sfmode; |
2219 | psl = pmc->sflist; | 2220 | psl = rtnl_dereference(pmc->sflist); |
2220 | rtnl_unlock(); | 2221 | rtnl_unlock(); |
2221 | count = psl ? psl->sl_count : 0; | 2222 | count = psl ? psl->sl_count : 0; |
2222 | copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; | 2223 | copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; |
@@ -2257,7 +2258,7 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif) | |||
2257 | goto out; | 2258 | goto out; |
2258 | 2259 | ||
2259 | rcu_read_lock(); | 2260 | rcu_read_lock(); |
2260 | for (pmc=rcu_dereference(inet->mc_list); pmc; pmc=rcu_dereference(pmc->next)) { | 2261 | for_each_pmc_rcu(inet, pmc) { |
2261 | if (pmc->multi.imr_multiaddr.s_addr == loc_addr && | 2262 | if (pmc->multi.imr_multiaddr.s_addr == loc_addr && |
2262 | pmc->multi.imr_ifindex == dif) | 2263 | pmc->multi.imr_ifindex == dif) |
2263 | break; | 2264 | break; |
@@ -2265,7 +2266,7 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif) | |||
2265 | ret = inet->mc_all; | 2266 | ret = inet->mc_all; |
2266 | if (!pmc) | 2267 | if (!pmc) |
2267 | goto unlock; | 2268 | goto unlock; |
2268 | psl = pmc->sflist; | 2269 | psl = rcu_dereference(pmc->sflist); |
2269 | ret = (pmc->sfmode == MCAST_EXCLUDE); | 2270 | ret = (pmc->sfmode == MCAST_EXCLUDE); |
2270 | if (!psl) | 2271 | if (!psl) |
2271 | goto unlock; | 2272 | goto unlock; |
@@ -2300,10 +2301,10 @@ void ip_mc_drop_socket(struct sock *sk) | |||
2300 | return; | 2301 | return; |
2301 | 2302 | ||
2302 | rtnl_lock(); | 2303 | rtnl_lock(); |
2303 | while ((iml = inet->mc_list) != NULL) { | 2304 | while ((iml = rtnl_dereference(inet->mc_list)) != NULL) { |
2304 | struct in_device *in_dev; | 2305 | struct in_device *in_dev; |
2305 | rcu_assign_pointer(inet->mc_list, iml->next); | ||
2306 | 2306 | ||
2307 | inet->mc_list = iml->next_rcu; | ||
2307 | in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); | 2308 | in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); |
2308 | (void) ip_mc_leave_src(sk, iml, in_dev); | 2309 | (void) ip_mc_leave_src(sk, iml, in_dev); |
2309 | if (in_dev != NULL) { | 2310 | if (in_dev != NULL) { |
@@ -2323,8 +2324,8 @@ int ip_check_mc(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 p | |||
2323 | struct ip_sf_list *psf; | 2324 | struct ip_sf_list *psf; |
2324 | int rv = 0; | 2325 | int rv = 0; |
2325 | 2326 | ||
2326 | read_lock(&in_dev->mc_list_lock); | 2327 | rcu_read_lock(); |
2327 | for (im=in_dev->mc_list; im; im=im->next) { | 2328 | for_each_pmc_rcu(in_dev, im) { |
2328 | if (im->multiaddr == mc_addr) | 2329 | if (im->multiaddr == mc_addr) |
2329 | break; | 2330 | break; |
2330 | } | 2331 | } |
@@ -2345,7 +2346,7 @@ int ip_check_mc(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 p | |||
2345 | } else | 2346 | } else |
2346 | rv = 1; /* unspecified source; tentatively allow */ | 2347 | rv = 1; /* unspecified source; tentatively allow */ |
2347 | } | 2348 | } |
2348 | read_unlock(&in_dev->mc_list_lock); | 2349 | rcu_read_unlock(); |
2349 | return rv; | 2350 | return rv; |
2350 | } | 2351 | } |
2351 | 2352 | ||
@@ -2371,13 +2372,11 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq) | |||
2371 | in_dev = __in_dev_get_rcu(state->dev); | 2372 | in_dev = __in_dev_get_rcu(state->dev); |
2372 | if (!in_dev) | 2373 | if (!in_dev) |
2373 | continue; | 2374 | continue; |
2374 | read_lock(&in_dev->mc_list_lock); | 2375 | im = rcu_dereference(in_dev->mc_list); |
2375 | im = in_dev->mc_list; | ||
2376 | if (im) { | 2376 | if (im) { |
2377 | state->in_dev = in_dev; | 2377 | state->in_dev = in_dev; |
2378 | break; | 2378 | break; |
2379 | } | 2379 | } |
2380 | read_unlock(&in_dev->mc_list_lock); | ||
2381 | } | 2380 | } |
2382 | return im; | 2381 | return im; |
2383 | } | 2382 | } |
@@ -2385,11 +2384,9 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq) | |||
2385 | static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im) | 2384 | static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im) |
2386 | { | 2385 | { |
2387 | struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); | 2386 | struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); |
2388 | im = im->next; | ||
2389 | while (!im) { | ||
2390 | if (likely(state->in_dev != NULL)) | ||
2391 | read_unlock(&state->in_dev->mc_list_lock); | ||
2392 | 2387 | ||
2388 | im = rcu_dereference(im->next_rcu); | ||
2389 | while (!im) { | ||
2393 | state->dev = next_net_device_rcu(state->dev); | 2390 | state->dev = next_net_device_rcu(state->dev); |
2394 | if (!state->dev) { | 2391 | if (!state->dev) { |
2395 | state->in_dev = NULL; | 2392 | state->in_dev = NULL; |
@@ -2398,8 +2395,7 @@ static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_li | |||
2398 | state->in_dev = __in_dev_get_rcu(state->dev); | 2395 | state->in_dev = __in_dev_get_rcu(state->dev); |
2399 | if (!state->in_dev) | 2396 | if (!state->in_dev) |
2400 | continue; | 2397 | continue; |
2401 | read_lock(&state->in_dev->mc_list_lock); | 2398 | im = rcu_dereference(state->in_dev->mc_list); |
2402 | im = state->in_dev->mc_list; | ||
2403 | } | 2399 | } |
2404 | return im; | 2400 | return im; |
2405 | } | 2401 | } |
@@ -2435,10 +2431,8 @@ static void igmp_mc_seq_stop(struct seq_file *seq, void *v) | |||
2435 | __releases(rcu) | 2431 | __releases(rcu) |
2436 | { | 2432 | { |
2437 | struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); | 2433 | struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); |
2438 | if (likely(state->in_dev != NULL)) { | 2434 | |
2439 | read_unlock(&state->in_dev->mc_list_lock); | 2435 | state->in_dev = NULL; |
2440 | state->in_dev = NULL; | ||
2441 | } | ||
2442 | state->dev = NULL; | 2436 | state->dev = NULL; |
2443 | rcu_read_unlock(); | 2437 | rcu_read_unlock(); |
2444 | } | 2438 | } |
@@ -2460,7 +2454,7 @@ static int igmp_mc_seq_show(struct seq_file *seq, void *v) | |||
2460 | querier = "NONE"; | 2454 | querier = "NONE"; |
2461 | #endif | 2455 | #endif |
2462 | 2456 | ||
2463 | if (state->in_dev->mc_list == im) { | 2457 | if (rcu_dereference(state->in_dev->mc_list) == im) { |
2464 | seq_printf(seq, "%d\t%-10s: %5d %7s\n", | 2458 | seq_printf(seq, "%d\t%-10s: %5d %7s\n", |
2465 | state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier); | 2459 | state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier); |
2466 | } | 2460 | } |
@@ -2519,8 +2513,7 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq) | |||
2519 | idev = __in_dev_get_rcu(state->dev); | 2513 | idev = __in_dev_get_rcu(state->dev); |
2520 | if (unlikely(idev == NULL)) | 2514 | if (unlikely(idev == NULL)) |
2521 | continue; | 2515 | continue; |
2522 | read_lock(&idev->mc_list_lock); | 2516 | im = rcu_dereference(idev->mc_list); |
2523 | im = idev->mc_list; | ||
2524 | if (likely(im != NULL)) { | 2517 | if (likely(im != NULL)) { |
2525 | spin_lock_bh(&im->lock); | 2518 | spin_lock_bh(&im->lock); |
2526 | psf = im->sources; | 2519 | psf = im->sources; |
@@ -2531,7 +2524,6 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq) | |||
2531 | } | 2524 | } |
2532 | spin_unlock_bh(&im->lock); | 2525 | spin_unlock_bh(&im->lock); |
2533 | } | 2526 | } |
2534 | read_unlock(&idev->mc_list_lock); | ||
2535 | } | 2527 | } |
2536 | return psf; | 2528 | return psf; |
2537 | } | 2529 | } |
@@ -2545,9 +2537,6 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l | |||
2545 | spin_unlock_bh(&state->im->lock); | 2537 | spin_unlock_bh(&state->im->lock); |
2546 | state->im = state->im->next; | 2538 | state->im = state->im->next; |
2547 | while (!state->im) { | 2539 | while (!state->im) { |
2548 | if (likely(state->idev != NULL)) | ||
2549 | read_unlock(&state->idev->mc_list_lock); | ||
2550 | |||
2551 | state->dev = next_net_device_rcu(state->dev); | 2540 | state->dev = next_net_device_rcu(state->dev); |
2552 | if (!state->dev) { | 2541 | if (!state->dev) { |
2553 | state->idev = NULL; | 2542 | state->idev = NULL; |
@@ -2556,8 +2545,7 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l | |||
2556 | state->idev = __in_dev_get_rcu(state->dev); | 2545 | state->idev = __in_dev_get_rcu(state->dev); |
2557 | if (!state->idev) | 2546 | if (!state->idev) |
2558 | continue; | 2547 | continue; |
2559 | read_lock(&state->idev->mc_list_lock); | 2548 | state->im = rcu_dereference(state->idev->mc_list); |
2560 | state->im = state->idev->mc_list; | ||
2561 | } | 2549 | } |
2562 | if (!state->im) | 2550 | if (!state->im) |
2563 | break; | 2551 | break; |
@@ -2603,10 +2591,7 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v) | |||
2603 | spin_unlock_bh(&state->im->lock); | 2591 | spin_unlock_bh(&state->im->lock); |
2604 | state->im = NULL; | 2592 | state->im = NULL; |
2605 | } | 2593 | } |
2606 | if (likely(state->idev != NULL)) { | 2594 | state->idev = NULL; |
2607 | read_unlock(&state->idev->mc_list_lock); | ||
2608 | state->idev = NULL; | ||
2609 | } | ||
2610 | state->dev = NULL; | 2595 | state->dev = NULL; |
2611 | rcu_read_unlock(); | 2596 | rcu_read_unlock(); |
2612 | } | 2597 | } |