summaryrefslogtreecommitdiffstats
path: root/net/rxrpc
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2019-07-30 09:42:50 -0400
committerDavid Howells <dhowells@redhat.com>2019-07-30 09:42:50 -0400
commit60034d3d146b11922ab1db613bce062dddc0327a (patch)
tree1587f2248bb2b5a0f6a2c1c6f91dda6ad88720b4 /net/rxrpc
parenta20961cc9493be46b5c4f565b925284a90c7864c (diff)
rxrpc: Fix potential deadlock
There is a potential deadlock in rxrpc_peer_keepalive_dispatch() whereby rxrpc_put_peer() is called with the peer_hash_lock held, but if it reduces the peer's refcount to 0, rxrpc_put_peer() calls __rxrpc_put_peer() - which the tries to take the already held lock. Fix this by providing a version of rxrpc_put_peer() that can be called in situations where the lock is already held. The bug may produce the following lockdep report: ============================================ WARNING: possible recursive locking detected 5.2.0-next-20190718 #41 Not tainted -------------------------------------------- kworker/0:3/21678 is trying to acquire lock: 00000000aa5eecdf (&(&rxnet->peer_hash_lock)->rlock){+.-.}, at: spin_lock_bh /./include/linux/spinlock.h:343 [inline] 00000000aa5eecdf (&(&rxnet->peer_hash_lock)->rlock){+.-.}, at: __rxrpc_put_peer /net/rxrpc/peer_object.c:415 [inline] 00000000aa5eecdf (&(&rxnet->peer_hash_lock)->rlock){+.-.}, at: rxrpc_put_peer+0x2d3/0x6a0 /net/rxrpc/peer_object.c:435 but task is already holding lock: 00000000aa5eecdf (&(&rxnet->peer_hash_lock)->rlock){+.-.}, at: spin_lock_bh /./include/linux/spinlock.h:343 [inline] 00000000aa5eecdf (&(&rxnet->peer_hash_lock)->rlock){+.-.}, at: rxrpc_peer_keepalive_dispatch /net/rxrpc/peer_event.c:378 [inline] 00000000aa5eecdf (&(&rxnet->peer_hash_lock)->rlock){+.-.}, at: rxrpc_peer_keepalive_worker+0x6b3/0xd02 /net/rxrpc/peer_event.c:430 Fixes: 330bdcfadcee ("rxrpc: Fix the keepalive generator [ver #2]") Reported-by: syzbot+72af434e4b3417318f84@syzkaller.appspotmail.com Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: Marc Dionne <marc.dionne@auristor.com> Reviewed-by: Jeffrey Altman <jaltman@auristor.com>
Diffstat (limited to 'net/rxrpc')
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/peer_event.c2
-rw-r--r--net/rxrpc/peer_object.c18
3 files changed, 20 insertions, 1 deletions
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 80335b4ee4fd..822f45386e31 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -1061,6 +1061,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *);
1061struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *); 1061struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
1062struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *); 1062struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
1063void rxrpc_put_peer(struct rxrpc_peer *); 1063void rxrpc_put_peer(struct rxrpc_peer *);
1064void rxrpc_put_peer_locked(struct rxrpc_peer *);
1064 1065
1065/* 1066/*
1066 * proc.c 1067 * proc.c
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 9f2f45c09e58..7666ec72d37e 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -378,7 +378,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
378 spin_lock_bh(&rxnet->peer_hash_lock); 378 spin_lock_bh(&rxnet->peer_hash_lock);
379 list_add_tail(&peer->keepalive_link, 379 list_add_tail(&peer->keepalive_link,
380 &rxnet->peer_keepalive[slot & mask]); 380 &rxnet->peer_keepalive[slot & mask]);
381 rxrpc_put_peer(peer); 381 rxrpc_put_peer_locked(peer);
382 } 382 }
383 383
384 spin_unlock_bh(&rxnet->peer_hash_lock); 384 spin_unlock_bh(&rxnet->peer_hash_lock);
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 9d3ce81cf8ae..9c3ac96f71cb 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -437,6 +437,24 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
437} 437}
438 438
439/* 439/*
440 * Drop a ref on a peer record where the caller already holds the
441 * peer_hash_lock.
442 */
443void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
444{
445 const void *here = __builtin_return_address(0);
446 int n;
447
448 n = atomic_dec_return(&peer->usage);
449 trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
450 if (n == 0) {
451 hash_del_rcu(&peer->hash_link);
452 list_del_init(&peer->keepalive_link);
453 kfree_rcu(peer, rcu);
454 }
455}
456
457/*
440 * Make sure all peer records have been discarded. 458 * Make sure all peer records have been discarded.
441 */ 459 */
442void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet) 460void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)