summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSowmini Varadhan <sowmini.varadhan@oracle.com>2018-02-03 07:26:51 -0500
committerDavid S. Miller <davem@davemloft.net>2018-02-08 15:23:52 -0500
commitebeeb1ad9b8adcc37c2ec21a96f39e9d35199b46 (patch)
tree25c1f3a044ab1c8f8e15031f606b00ae35db2ed0
parent79a8a642bf05cd0dced20621f6fef9d884124abd (diff)
rds: tcp: use rds_destroy_pending() to synchronize netns/module teardown and rds connection/workq management
An rds_connection can get added during netns deletion between lines 528 and 529 of 506 static void rds_tcp_kill_sock(struct net *net) : /* code to pull out all the rds_connections that should be destroyed */ : 528 spin_unlock_irq(&rds_tcp_conn_lock); 529 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) 530 rds_conn_destroy(tc->t_cpath->cp_conn); Such an rds_connection would miss out the rds_conn_destroy() loop (that cancels all pending work) and (if it was scheduled after netns deletion) could trigger the use-after-free. A similar race-window exists for the module unload path in rds_tcp_exit -> rds_tcp_destroy_conns Concurrency with netns deletion (rds_tcp_kill_sock()) must be handled by checking check_net() before enqueuing new work or adding new connections. Concurrency with module-unload is handled by maintaining a module specific flag that is set at the start of the module exit function, and must be checked before enqueuing new work or adding new connections. This commit refactors existing RDS_DESTROY_PENDING checks added by commit 3db6e0d172c9 ("rds: use RCU to synchronize work-enqueue with connection teardown") and consolidates all the concurrency checks listed above into the function rds_destroy_pending(). Signed-off-by: Sowmini Varadhan <sowmini.varadhan@oracle.com> Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/rds/cong.c2
-rw-r--r--net/rds/connection.c15
-rw-r--r--net/rds/ib.c17
-rw-r--r--net/rds/ib_cm.c1
-rw-r--r--net/rds/rds.h7
-rw-r--r--net/rds/send.c10
-rw-r--r--net/rds/tcp.c42
-rw-r--r--net/rds/tcp_connect.c2
-rw-r--r--net/rds/tcp_recv.c2
-rw-r--r--net/rds/tcp_send.c2
-rw-r--r--net/rds/threads.c6
11 files changed, 76 insertions, 30 deletions
diff --git a/net/rds/cong.c b/net/rds/cong.c
index 8d19fd25dce3..63da9d2f142d 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -223,7 +223,7 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
223 223
224 rcu_read_lock(); 224 rcu_read_lock();
225 if (!test_and_set_bit(0, &conn->c_map_queued) && 225 if (!test_and_set_bit(0, &conn->c_map_queued) &&
226 !test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) { 226 !rds_destroy_pending(cp->cp_conn)) {
227 rds_stats_inc(s_cong_update_queued); 227 rds_stats_inc(s_cong_update_queued);
228 /* We cannot inline the call to rds_send_xmit() here 228 /* We cannot inline the call to rds_send_xmit() here
229 * for two reasons (both pertaining to a TCP transport): 229 * for two reasons (both pertaining to a TCP transport):
diff --git a/net/rds/connection.c b/net/rds/connection.c
index b10c0ef36d8d..94e190febfdd 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -220,8 +220,13 @@ static struct rds_connection *__rds_conn_create(struct net *net,
220 is_outgoing); 220 is_outgoing);
221 conn->c_path[i].cp_index = i; 221 conn->c_path[i].cp_index = i;
222 } 222 }
223 ret = trans->conn_alloc(conn, gfp); 223 rcu_read_lock();
224 if (rds_destroy_pending(conn))
225 ret = -ENETDOWN;
226 else
227 ret = trans->conn_alloc(conn, gfp);
224 if (ret) { 228 if (ret) {
229 rcu_read_unlock();
225 kfree(conn->c_path); 230 kfree(conn->c_path);
226 kmem_cache_free(rds_conn_slab, conn); 231 kmem_cache_free(rds_conn_slab, conn);
227 conn = ERR_PTR(ret); 232 conn = ERR_PTR(ret);
@@ -283,6 +288,7 @@ static struct rds_connection *__rds_conn_create(struct net *net,
283 } 288 }
284 } 289 }
285 spin_unlock_irqrestore(&rds_conn_lock, flags); 290 spin_unlock_irqrestore(&rds_conn_lock, flags);
291 rcu_read_unlock();
286 292
287out: 293out:
288 return conn; 294 return conn;
@@ -382,13 +388,10 @@ static void rds_conn_path_destroy(struct rds_conn_path *cp)
382{ 388{
383 struct rds_message *rm, *rtmp; 389 struct rds_message *rm, *rtmp;
384 390
385 set_bit(RDS_DESTROY_PENDING, &cp->cp_flags);
386
387 if (!cp->cp_transport_data) 391 if (!cp->cp_transport_data)
388 return; 392 return;
389 393
390 /* make sure lingering queued work won't try to ref the conn */ 394 /* make sure lingering queued work won't try to ref the conn */
391 synchronize_rcu();
392 cancel_delayed_work_sync(&cp->cp_send_w); 395 cancel_delayed_work_sync(&cp->cp_send_w);
393 cancel_delayed_work_sync(&cp->cp_recv_w); 396 cancel_delayed_work_sync(&cp->cp_recv_w);
394 397
@@ -691,7 +694,7 @@ void rds_conn_path_drop(struct rds_conn_path *cp, bool destroy)
691 atomic_set(&cp->cp_state, RDS_CONN_ERROR); 694 atomic_set(&cp->cp_state, RDS_CONN_ERROR);
692 695
693 rcu_read_lock(); 696 rcu_read_lock();
694 if (!destroy && test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) { 697 if (!destroy && rds_destroy_pending(cp->cp_conn)) {
695 rcu_read_unlock(); 698 rcu_read_unlock();
696 return; 699 return;
697 } 700 }
@@ -714,7 +717,7 @@ EXPORT_SYMBOL_GPL(rds_conn_drop);
714void rds_conn_path_connect_if_down(struct rds_conn_path *cp) 717void rds_conn_path_connect_if_down(struct rds_conn_path *cp)
715{ 718{
716 rcu_read_lock(); 719 rcu_read_lock();
717 if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) { 720 if (rds_destroy_pending(cp->cp_conn)) {
718 rcu_read_unlock(); 721 rcu_read_unlock();
719 return; 722 return;
720 } 723 }
diff --git a/net/rds/ib.c b/net/rds/ib.c
index ff0c98096af1..50a88f3e7e39 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -48,6 +48,7 @@
48static unsigned int rds_ib_mr_1m_pool_size = RDS_MR_1M_POOL_SIZE; 48static unsigned int rds_ib_mr_1m_pool_size = RDS_MR_1M_POOL_SIZE;
49static unsigned int rds_ib_mr_8k_pool_size = RDS_MR_8K_POOL_SIZE; 49static unsigned int rds_ib_mr_8k_pool_size = RDS_MR_8K_POOL_SIZE;
50unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT; 50unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT;
51static atomic_t rds_ib_unloading;
51 52
52module_param(rds_ib_mr_1m_pool_size, int, 0444); 53module_param(rds_ib_mr_1m_pool_size, int, 0444);
53MODULE_PARM_DESC(rds_ib_mr_1m_pool_size, " Max number of 1M mr per HCA"); 54MODULE_PARM_DESC(rds_ib_mr_1m_pool_size, " Max number of 1M mr per HCA");
@@ -378,8 +379,23 @@ static void rds_ib_unregister_client(void)
378 flush_workqueue(rds_wq); 379 flush_workqueue(rds_wq);
379} 380}
380 381
382static void rds_ib_set_unloading(void)
383{
384 atomic_set(&rds_ib_unloading, 1);
385}
386
387static bool rds_ib_is_unloading(struct rds_connection *conn)
388{
389 struct rds_conn_path *cp = &conn->c_path[0];
390
391 return (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags) ||
392 atomic_read(&rds_ib_unloading) != 0);
393}
394
381void rds_ib_exit(void) 395void rds_ib_exit(void)
382{ 396{
397 rds_ib_set_unloading();
398 synchronize_rcu();
383 rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); 399 rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
384 rds_ib_unregister_client(); 400 rds_ib_unregister_client();
385 rds_ib_destroy_nodev_conns(); 401 rds_ib_destroy_nodev_conns();
@@ -413,6 +429,7 @@ struct rds_transport rds_ib_transport = {
413 .flush_mrs = rds_ib_flush_mrs, 429 .flush_mrs = rds_ib_flush_mrs,
414 .t_owner = THIS_MODULE, 430 .t_owner = THIS_MODULE,
415 .t_name = "infiniband", 431 .t_name = "infiniband",
432 .t_unloading = rds_ib_is_unloading,
416 .t_type = RDS_TRANS_IB 433 .t_type = RDS_TRANS_IB
417}; 434};
418 435
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 80fb6f63e768..eea1d8611b20 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -117,6 +117,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
117 &conn->c_laddr, &conn->c_faddr, 117 &conn->c_laddr, &conn->c_faddr,
118 RDS_PROTOCOL_MAJOR(conn->c_version), 118 RDS_PROTOCOL_MAJOR(conn->c_version),
119 RDS_PROTOCOL_MINOR(conn->c_version)); 119 RDS_PROTOCOL_MINOR(conn->c_version));
120 set_bit(RDS_DESTROY_PENDING, &conn->c_path[0].cp_flags);
120 rds_conn_destroy(conn); 121 rds_conn_destroy(conn);
121 return; 122 return;
122 } else { 123 } else {
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 374ae83b60d4..7301b9b01890 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -518,6 +518,7 @@ struct rds_transport {
518 void (*sync_mr)(void *trans_private, int direction); 518 void (*sync_mr)(void *trans_private, int direction);
519 void (*free_mr)(void *trans_private, int invalidate); 519 void (*free_mr)(void *trans_private, int invalidate);
520 void (*flush_mrs)(void); 520 void (*flush_mrs)(void);
521 bool (*t_unloading)(struct rds_connection *conn);
521}; 522};
522 523
523struct rds_sock { 524struct rds_sock {
@@ -862,6 +863,12 @@ static inline void rds_mr_put(struct rds_mr *mr)
862 __rds_put_mr_final(mr); 863 __rds_put_mr_final(mr);
863} 864}
864 865
866static inline bool rds_destroy_pending(struct rds_connection *conn)
867{
868 return !check_net(rds_conn_net(conn)) ||
869 (conn->c_trans->t_unloading && conn->c_trans->t_unloading(conn));
870}
871
865/* stats.c */ 872/* stats.c */
866DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats); 873DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
867#define rds_stats_inc_which(which, member) do { \ 874#define rds_stats_inc_which(which, member) do { \
diff --git a/net/rds/send.c b/net/rds/send.c
index d3e32d1f3c7d..b1b0022b8370 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -162,7 +162,7 @@ restart:
162 goto out; 162 goto out;
163 } 163 }
164 164
165 if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) { 165 if (rds_destroy_pending(cp->cp_conn)) {
166 release_in_xmit(cp); 166 release_in_xmit(cp);
167 ret = -ENETUNREACH; /* dont requeue send work */ 167 ret = -ENETUNREACH; /* dont requeue send work */
168 goto out; 168 goto out;
@@ -444,7 +444,7 @@ over_batch:
444 if (batch_count < send_batch_count) 444 if (batch_count < send_batch_count)
445 goto restart; 445 goto restart;
446 rcu_read_lock(); 446 rcu_read_lock();
447 if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) 447 if (rds_destroy_pending(cp->cp_conn))
448 ret = -ENETUNREACH; 448 ret = -ENETUNREACH;
449 else 449 else
450 queue_delayed_work(rds_wq, &cp->cp_send_w, 1); 450 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
@@ -1162,7 +1162,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1162 else 1162 else
1163 cpath = &conn->c_path[0]; 1163 cpath = &conn->c_path[0];
1164 1164
1165 if (test_bit(RDS_DESTROY_PENDING, &cpath->cp_flags)) { 1165 if (rds_destroy_pending(conn)) {
1166 ret = -EAGAIN; 1166 ret = -EAGAIN;
1167 goto out; 1167 goto out;
1168 } 1168 }
@@ -1209,7 +1209,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1209 if (ret == -ENOMEM || ret == -EAGAIN) { 1209 if (ret == -ENOMEM || ret == -EAGAIN) {
1210 ret = 0; 1210 ret = 0;
1211 rcu_read_lock(); 1211 rcu_read_lock();
1212 if (test_bit(RDS_DESTROY_PENDING, &cpath->cp_flags)) 1212 if (rds_destroy_pending(cpath->cp_conn))
1213 ret = -ENETUNREACH; 1213 ret = -ENETUNREACH;
1214 else 1214 else
1215 queue_delayed_work(rds_wq, &cpath->cp_send_w, 1); 1215 queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
@@ -1295,7 +1295,7 @@ rds_send_probe(struct rds_conn_path *cp, __be16 sport,
1295 1295
1296 /* schedule the send work on rds_wq */ 1296 /* schedule the send work on rds_wq */
1297 rcu_read_lock(); 1297 rcu_read_lock();
1298 if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) 1298 if (!rds_destroy_pending(cp->cp_conn))
1299 queue_delayed_work(rds_wq, &cp->cp_send_w, 1); 1299 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
1300 rcu_read_unlock(); 1300 rcu_read_unlock();
1301 1301
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 9920d2f84eff..44c4652721af 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -49,6 +49,7 @@ static unsigned int rds_tcp_tc_count;
49/* Track rds_tcp_connection structs so they can be cleaned up */ 49/* Track rds_tcp_connection structs so they can be cleaned up */
50static DEFINE_SPINLOCK(rds_tcp_conn_lock); 50static DEFINE_SPINLOCK(rds_tcp_conn_lock);
51static LIST_HEAD(rds_tcp_conn_list); 51static LIST_HEAD(rds_tcp_conn_list);
52static atomic_t rds_tcp_unloading = ATOMIC_INIT(0);
52 53
53static struct kmem_cache *rds_tcp_conn_slab; 54static struct kmem_cache *rds_tcp_conn_slab;
54 55
@@ -274,14 +275,13 @@ static int rds_tcp_laddr_check(struct net *net, __be32 addr)
274static void rds_tcp_conn_free(void *arg) 275static void rds_tcp_conn_free(void *arg)
275{ 276{
276 struct rds_tcp_connection *tc = arg; 277 struct rds_tcp_connection *tc = arg;
277 unsigned long flags;
278 278
279 rdsdebug("freeing tc %p\n", tc); 279 rdsdebug("freeing tc %p\n", tc);
280 280
281 spin_lock_irqsave(&rds_tcp_conn_lock, flags); 281 spin_lock_bh(&rds_tcp_conn_lock);
282 if (!tc->t_tcp_node_detached) 282 if (!tc->t_tcp_node_detached)
283 list_del(&tc->t_tcp_node); 283 list_del(&tc->t_tcp_node);
284 spin_unlock_irqrestore(&rds_tcp_conn_lock, flags); 284 spin_unlock_bh(&rds_tcp_conn_lock);
285 285
286 kmem_cache_free(rds_tcp_conn_slab, tc); 286 kmem_cache_free(rds_tcp_conn_slab, tc);
287} 287}
@@ -296,7 +296,7 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
296 tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp); 296 tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
297 if (!tc) { 297 if (!tc) {
298 ret = -ENOMEM; 298 ret = -ENOMEM;
299 break; 299 goto fail;
300 } 300 }
301 mutex_init(&tc->t_conn_path_lock); 301 mutex_init(&tc->t_conn_path_lock);
302 tc->t_sock = NULL; 302 tc->t_sock = NULL;
@@ -306,14 +306,19 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
306 306
307 conn->c_path[i].cp_transport_data = tc; 307 conn->c_path[i].cp_transport_data = tc;
308 tc->t_cpath = &conn->c_path[i]; 308 tc->t_cpath = &conn->c_path[i];
309 tc->t_tcp_node_detached = true;
309 310
310 spin_lock_irq(&rds_tcp_conn_lock);
311 tc->t_tcp_node_detached = false;
312 list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
313 spin_unlock_irq(&rds_tcp_conn_lock);
314 rdsdebug("rds_conn_path [%d] tc %p\n", i, 311 rdsdebug("rds_conn_path [%d] tc %p\n", i,
315 conn->c_path[i].cp_transport_data); 312 conn->c_path[i].cp_transport_data);
316 } 313 }
314 spin_lock_bh(&rds_tcp_conn_lock);
315 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
316 tc = conn->c_path[i].cp_transport_data;
317 tc->t_tcp_node_detached = false;
318 list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
319 }
320 spin_unlock_bh(&rds_tcp_conn_lock);
321fail:
317 if (ret) { 322 if (ret) {
318 for (j = 0; j < i; j++) 323 for (j = 0; j < i; j++)
319 rds_tcp_conn_free(conn->c_path[j].cp_transport_data); 324 rds_tcp_conn_free(conn->c_path[j].cp_transport_data);
@@ -332,6 +337,16 @@ static bool list_has_conn(struct list_head *list, struct rds_connection *conn)
332 return false; 337 return false;
333} 338}
334 339
340static void rds_tcp_set_unloading(void)
341{
342 atomic_set(&rds_tcp_unloading, 1);
343}
344
345static bool rds_tcp_is_unloading(struct rds_connection *conn)
346{
347 return atomic_read(&rds_tcp_unloading) != 0;
348}
349
335static void rds_tcp_destroy_conns(void) 350static void rds_tcp_destroy_conns(void)
336{ 351{
337 struct rds_tcp_connection *tc, *_tc; 352 struct rds_tcp_connection *tc, *_tc;
@@ -370,6 +385,7 @@ struct rds_transport rds_tcp_transport = {
370 .t_type = RDS_TRANS_TCP, 385 .t_type = RDS_TRANS_TCP,
371 .t_prefer_loopback = 1, 386 .t_prefer_loopback = 1,
372 .t_mp_capable = 1, 387 .t_mp_capable = 1,
388 .t_unloading = rds_tcp_is_unloading,
373}; 389};
374 390
375static unsigned int rds_tcp_netid; 391static unsigned int rds_tcp_netid;
@@ -513,7 +529,7 @@ static void rds_tcp_kill_sock(struct net *net)
513 529
514 rtn->rds_tcp_listen_sock = NULL; 530 rtn->rds_tcp_listen_sock = NULL;
515 rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w); 531 rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
516 spin_lock_irq(&rds_tcp_conn_lock); 532 spin_lock_bh(&rds_tcp_conn_lock);
517 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { 533 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
518 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); 534 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
519 535
@@ -526,7 +542,7 @@ static void rds_tcp_kill_sock(struct net *net)
526 tc->t_tcp_node_detached = true; 542 tc->t_tcp_node_detached = true;
527 } 543 }
528 } 544 }
529 spin_unlock_irq(&rds_tcp_conn_lock); 545 spin_unlock_bh(&rds_tcp_conn_lock);
530 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) 546 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
531 rds_conn_destroy(tc->t_cpath->cp_conn); 547 rds_conn_destroy(tc->t_cpath->cp_conn);
532} 548}
@@ -574,7 +590,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
574{ 590{
575 struct rds_tcp_connection *tc, *_tc; 591 struct rds_tcp_connection *tc, *_tc;
576 592
577 spin_lock_irq(&rds_tcp_conn_lock); 593 spin_lock_bh(&rds_tcp_conn_lock);
578 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { 594 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
579 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); 595 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
580 596
@@ -584,7 +600,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
584 /* reconnect with new parameters */ 600 /* reconnect with new parameters */
585 rds_conn_path_drop(tc->t_cpath, false); 601 rds_conn_path_drop(tc->t_cpath, false);
586 } 602 }
587 spin_unlock_irq(&rds_tcp_conn_lock); 603 spin_unlock_bh(&rds_tcp_conn_lock);
588} 604}
589 605
590static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write, 606static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
@@ -607,6 +623,8 @@ static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
607 623
608static void rds_tcp_exit(void) 624static void rds_tcp_exit(void)
609{ 625{
626 rds_tcp_set_unloading();
627 synchronize_rcu();
610 rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info); 628 rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
611 unregister_pernet_subsys(&rds_tcp_net_ops); 629 unregister_pernet_subsys(&rds_tcp_net_ops);
612 if (unregister_netdevice_notifier(&rds_tcp_dev_notifier)) 630 if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 534c67aeb20f..d999e7075645 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -170,7 +170,7 @@ void rds_tcp_conn_path_shutdown(struct rds_conn_path *cp)
170 cp->cp_conn, tc, sock); 170 cp->cp_conn, tc, sock);
171 171
172 if (sock) { 172 if (sock) {
173 if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) 173 if (rds_destroy_pending(cp->cp_conn))
174 rds_tcp_set_linger(sock); 174 rds_tcp_set_linger(sock);
175 sock->ops->shutdown(sock, RCV_SHUTDOWN | SEND_SHUTDOWN); 175 sock->ops->shutdown(sock, RCV_SHUTDOWN | SEND_SHUTDOWN);
176 lock_sock(sock->sk); 176 lock_sock(sock->sk);
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index dd707b9e73e5..b9fbd2ee74ef 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -323,7 +323,7 @@ void rds_tcp_data_ready(struct sock *sk)
323 323
324 if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM) { 324 if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM) {
325 rcu_read_lock(); 325 rcu_read_lock();
326 if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) 326 if (!rds_destroy_pending(cp->cp_conn))
327 queue_delayed_work(rds_wq, &cp->cp_recv_w, 0); 327 queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
328 rcu_read_unlock(); 328 rcu_read_unlock();
329 } 329 }
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 16f65744d984..7df869d37afd 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -204,7 +204,7 @@ void rds_tcp_write_space(struct sock *sk)
204 204
205 rcu_read_lock(); 205 rcu_read_lock();
206 if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf && 206 if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf &&
207 !test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) 207 !rds_destroy_pending(cp->cp_conn))
208 queue_delayed_work(rds_wq, &cp->cp_send_w, 0); 208 queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
209 rcu_read_unlock(); 209 rcu_read_unlock();
210 210
diff --git a/net/rds/threads.c b/net/rds/threads.c
index eb76db1360b0..c52861d77a59 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -88,7 +88,7 @@ void rds_connect_path_complete(struct rds_conn_path *cp, int curr)
88 cp->cp_reconnect_jiffies = 0; 88 cp->cp_reconnect_jiffies = 0;
89 set_bit(0, &cp->cp_conn->c_map_queued); 89 set_bit(0, &cp->cp_conn->c_map_queued);
90 rcu_read_lock(); 90 rcu_read_lock();
91 if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) { 91 if (!rds_destroy_pending(cp->cp_conn)) {
92 queue_delayed_work(rds_wq, &cp->cp_send_w, 0); 92 queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
93 queue_delayed_work(rds_wq, &cp->cp_recv_w, 0); 93 queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
94 } 94 }
@@ -138,7 +138,7 @@ void rds_queue_reconnect(struct rds_conn_path *cp)
138 if (cp->cp_reconnect_jiffies == 0) { 138 if (cp->cp_reconnect_jiffies == 0) {
139 cp->cp_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies; 139 cp->cp_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies;
140 rcu_read_lock(); 140 rcu_read_lock();
141 if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) 141 if (!rds_destroy_pending(cp->cp_conn))
142 queue_delayed_work(rds_wq, &cp->cp_conn_w, 0); 142 queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
143 rcu_read_unlock(); 143 rcu_read_unlock();
144 return; 144 return;
@@ -149,7 +149,7 @@ void rds_queue_reconnect(struct rds_conn_path *cp)
149 rand % cp->cp_reconnect_jiffies, cp->cp_reconnect_jiffies, 149 rand % cp->cp_reconnect_jiffies, cp->cp_reconnect_jiffies,
150 conn, &conn->c_laddr, &conn->c_faddr); 150 conn, &conn->c_laddr, &conn->c_faddr);
151 rcu_read_lock(); 151 rcu_read_lock();
152 if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) 152 if (!rds_destroy_pending(cp->cp_conn))
153 queue_delayed_work(rds_wq, &cp->cp_conn_w, 153 queue_delayed_work(rds_wq, &cp->cp_conn_w,
154 rand % cp->cp_reconnect_jiffies); 154 rand % cp->cp_reconnect_jiffies);
155 rcu_read_unlock(); 155 rcu_read_unlock();