diff options
author | Sowmini Varadhan <sowmini.varadhan@oracle.com> | 2018-03-15 06:54:26 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-03-17 17:18:54 -0400 |
commit | 53d0e83f9329aa51dcc205b514dbee05cb4df309 (patch) | |
tree | 71b0c06bfe39f8f46320853cd8a6c7944354a34f /net/rds | |
parent | 3008ba5faa7a63bac2227c56b533c08fa6d54a0b (diff) |
rds: tcp: must use spin_lock_irq* and not spin_lock_bh with rds_tcp_conn_lock
rds_tcp_connection allocation/free management has the potential to be
called from __rds_conn_create after IRQs have been disabled, so
spin_[un]lock_bh cannot be used with rds_tcp_conn_lock.
Bottom-halves that need to synchronize for critical sections protected
by rds_tcp_conn_lock should instead use rds_destroy_pending() correctly.
Reported-by: syzbot+c68e51bb5e699d3f8d91@syzkaller.appspotmail.com
Fixes: ebeeb1ad9b8a ("rds: tcp: use rds_destroy_pending() to synchronize
netns/module teardown and rds connection/workq management")
Signed-off-by: Sowmini Varadhan <sowmini.varadhan@oracle.com>
Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rds')
-rw-r--r-- | net/rds/tcp.c | 17 |
1 files changed, 9 insertions, 8 deletions
diff --git a/net/rds/tcp.c b/net/rds/tcp.c index eb04e7fa2467..08ea9cd5c2f6 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c | |||
@@ -272,13 +272,14 @@ static int rds_tcp_laddr_check(struct net *net, __be32 addr) | |||
272 | static void rds_tcp_conn_free(void *arg) | 272 | static void rds_tcp_conn_free(void *arg) |
273 | { | 273 | { |
274 | struct rds_tcp_connection *tc = arg; | 274 | struct rds_tcp_connection *tc = arg; |
275 | unsigned long flags; | ||
275 | 276 | ||
276 | rdsdebug("freeing tc %p\n", tc); | 277 | rdsdebug("freeing tc %p\n", tc); |
277 | 278 | ||
278 | spin_lock_bh(&rds_tcp_conn_lock); | 279 | spin_lock_irqsave(&rds_tcp_conn_lock, flags); |
279 | if (!tc->t_tcp_node_detached) | 280 | if (!tc->t_tcp_node_detached) |
280 | list_del(&tc->t_tcp_node); | 281 | list_del(&tc->t_tcp_node); |
281 | spin_unlock_bh(&rds_tcp_conn_lock); | 282 | spin_unlock_irqrestore(&rds_tcp_conn_lock, flags); |
282 | 283 | ||
283 | kmem_cache_free(rds_tcp_conn_slab, tc); | 284 | kmem_cache_free(rds_tcp_conn_slab, tc); |
284 | } | 285 | } |
@@ -308,13 +309,13 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp) | |||
308 | rdsdebug("rds_conn_path [%d] tc %p\n", i, | 309 | rdsdebug("rds_conn_path [%d] tc %p\n", i, |
309 | conn->c_path[i].cp_transport_data); | 310 | conn->c_path[i].cp_transport_data); |
310 | } | 311 | } |
311 | spin_lock_bh(&rds_tcp_conn_lock); | 312 | spin_lock_irq(&rds_tcp_conn_lock); |
312 | for (i = 0; i < RDS_MPATH_WORKERS; i++) { | 313 | for (i = 0; i < RDS_MPATH_WORKERS; i++) { |
313 | tc = conn->c_path[i].cp_transport_data; | 314 | tc = conn->c_path[i].cp_transport_data; |
314 | tc->t_tcp_node_detached = false; | 315 | tc->t_tcp_node_detached = false; |
315 | list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list); | 316 | list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list); |
316 | } | 317 | } |
317 | spin_unlock_bh(&rds_tcp_conn_lock); | 318 | spin_unlock_irq(&rds_tcp_conn_lock); |
318 | fail: | 319 | fail: |
319 | if (ret) { | 320 | if (ret) { |
320 | for (j = 0; j < i; j++) | 321 | for (j = 0; j < i; j++) |
@@ -527,7 +528,7 @@ static void rds_tcp_kill_sock(struct net *net) | |||
527 | 528 | ||
528 | rtn->rds_tcp_listen_sock = NULL; | 529 | rtn->rds_tcp_listen_sock = NULL; |
529 | rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w); | 530 | rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w); |
530 | spin_lock_bh(&rds_tcp_conn_lock); | 531 | spin_lock_irq(&rds_tcp_conn_lock); |
531 | list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { | 532 | list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { |
532 | struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); | 533 | struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); |
533 | 534 | ||
@@ -540,7 +541,7 @@ static void rds_tcp_kill_sock(struct net *net) | |||
540 | tc->t_tcp_node_detached = true; | 541 | tc->t_tcp_node_detached = true; |
541 | } | 542 | } |
542 | } | 543 | } |
543 | spin_unlock_bh(&rds_tcp_conn_lock); | 544 | spin_unlock_irq(&rds_tcp_conn_lock); |
544 | list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) | 545 | list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) |
545 | rds_conn_destroy(tc->t_cpath->cp_conn); | 546 | rds_conn_destroy(tc->t_cpath->cp_conn); |
546 | } | 547 | } |
@@ -588,7 +589,7 @@ static void rds_tcp_sysctl_reset(struct net *net) | |||
588 | { | 589 | { |
589 | struct rds_tcp_connection *tc, *_tc; | 590 | struct rds_tcp_connection *tc, *_tc; |
590 | 591 | ||
591 | spin_lock_bh(&rds_tcp_conn_lock); | 592 | spin_lock_irq(&rds_tcp_conn_lock); |
592 | list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { | 593 | list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { |
593 | struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); | 594 | struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); |
594 | 595 | ||
@@ -598,7 +599,7 @@ static void rds_tcp_sysctl_reset(struct net *net) | |||
598 | /* reconnect with new parameters */ | 599 | /* reconnect with new parameters */ |
599 | rds_conn_path_drop(tc->t_cpath, false); | 600 | rds_conn_path_drop(tc->t_cpath, false); |
600 | } | 601 | } |
601 | spin_unlock_bh(&rds_tcp_conn_lock); | 602 | spin_unlock_irq(&rds_tcp_conn_lock); |
602 | } | 603 | } |
603 | 604 | ||
604 | static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write, | 605 | static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write, |