diff options
author | David Howells <dhowells@redhat.com> | 2019-04-30 03:34:08 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-04-30 10:50:50 -0400 |
commit | b13023421b5179413421333f602850914f6a7ad8 (patch) | |
tree | 83cf6ecb7938b0b73b5d76a960a943a17737d805 /net/rxrpc | |
parent | b145745fc8d15cf9d45a5c7a8f5dbc4862e17cf2 (diff) |
rxrpc: Fix net namespace cleanup
In rxrpc_destroy_all_calls(), there are two phases: (1) make sure the
->calls list is empty, emitting error messages if not, and (2) wait for the
RCU cleanup to happen on outstanding calls (ie. ->nr_calls becomes 0).
To avoid taking the call_lock, the function prechecks ->calls and if empty,
it returns to avoid taking the lock - this is wrong, however: it still
needs to go and do the second phase and wait for ->nr_calls to become 0.
Without this, the rxrpc_net struct may get deallocated before we get to the
RCU cleanup for the last calls. This can lead to:
Slab corruption (Not tainted): kmalloc-16k start=ffff88802b178000, len=16384
050: 6b 6b 6b 6b 6b 6b 6b 6b 61 6b 6b 6b 6b 6b 6b 6b kkkkkkkkakkkkkkk
Note the "61" at offset 0x58. This corresponds to the ->nr_calls member of
struct rxrpc_net (which is >9k in size, and thus allocated out of the 16k
slab).
Fix this by flipping the condition on the if-statement, putting the locked
section inside the if-body and dropping the return from there. The
function will then always go on to wait for the RCU cleanup on outstanding
calls.
Fixes: 2baec2c3f854 ("rxrpc: Support network namespacing")
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rxrpc')
-rw-r--r-- | net/rxrpc/call_object.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 8aa2937b069f..fe96881a334d 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c | |||
@@ -604,30 +604,30 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet) | |||
604 | 604 | ||
605 | _enter(""); | 605 | _enter(""); |
606 | 606 | ||
607 | if (list_empty(&rxnet->calls)) | 607 | if (!list_empty(&rxnet->calls)) { |
608 | return; | 608 | write_lock(&rxnet->call_lock); |
609 | 609 | ||
610 | write_lock(&rxnet->call_lock); | 610 | while (!list_empty(&rxnet->calls)) { |
611 | call = list_entry(rxnet->calls.next, | ||
612 | struct rxrpc_call, link); | ||
613 | _debug("Zapping call %p", call); | ||
611 | 614 | ||
612 | while (!list_empty(&rxnet->calls)) { | 615 | rxrpc_see_call(call); |
613 | call = list_entry(rxnet->calls.next, struct rxrpc_call, link); | 616 | list_del_init(&call->link); |
614 | _debug("Zapping call %p", call); | ||
615 | 617 | ||
616 | rxrpc_see_call(call); | 618 | pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n", |
617 | list_del_init(&call->link); | 619 | call, atomic_read(&call->usage), |
620 | rxrpc_call_states[call->state], | ||
621 | call->flags, call->events); | ||
618 | 622 | ||
619 | pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n", | 623 | write_unlock(&rxnet->call_lock); |
620 | call, atomic_read(&call->usage), | 624 | cond_resched(); |
621 | rxrpc_call_states[call->state], | 625 | write_lock(&rxnet->call_lock); |
622 | call->flags, call->events); | 626 | } |
623 | 627 | ||
624 | write_unlock(&rxnet->call_lock); | 628 | write_unlock(&rxnet->call_lock); |
625 | cond_resched(); | ||
626 | write_lock(&rxnet->call_lock); | ||
627 | } | 629 | } |
628 | 630 | ||
629 | write_unlock(&rxnet->call_lock); | ||
630 | |||
631 | atomic_dec(&rxnet->nr_calls); | 631 | atomic_dec(&rxnet->nr_calls); |
632 | wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls)); | 632 | wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls)); |
633 | } | 633 | } |