diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2006-05-05 20:09:13 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2006-05-05 20:09:13 -0400 |
commit | 134af34632a7b3b0a98a79a2e56bf9cc927e0eac (patch) | |
tree | b54012edae78a294723fba01d684cd41b8cd6e97 /net/dccp | |
parent | 1c29fc4989bc2a3838b2837adc12b8aeb0feeede (diff) |
[DCCP]: Fix sock_orphan dead lock
Calling sock_orphan inside bh_lock_sock in dccp_close can lead to dead
locks. For example, the inet_diag code holds sk_callback_lock without
disabling BH. If an inbound packet arrives during that admittedly tiny
window, it will cause a dead lock on bh_lock_sock. Another possible
path would be through sock_wfree if the network device driver frees the
tx skb in process context with BH enabled.
We can fix this by moving sock_orphan out of bh_lock_sock.
The tricky bit is to work out when we need to destroy the socket
ourselves and when it has already been destroyed by someone else.
By moving sock_orphan before the release_sock we can solve this
problem. This is because as long as we own the socket lock its
state cannot change.
So we simply record the socket state before the release_sock
and then check the state again after we regain the socket lock.
If the socket state has transitioned to DCCP_CLOSED in the time being,
we know that the socket has been destroyed. Otherwise the socket is
still ours to keep.
This problem was discoverd by Ingo Molnar using his lock validator.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/dccp')
-rw-r--r-- | net/dccp/proto.c | 13 |
1 files changed, 10 insertions, 3 deletions
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 1ff7328b0e17..2e0ee8355c41 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -848,6 +848,7 @@ static int dccp_close_state(struct sock *sk) | |||
848 | void dccp_close(struct sock *sk, long timeout) | 848 | void dccp_close(struct sock *sk, long timeout) |
849 | { | 849 | { |
850 | struct sk_buff *skb; | 850 | struct sk_buff *skb; |
851 | int state; | ||
851 | 852 | ||
852 | lock_sock(sk); | 853 | lock_sock(sk); |
853 | 854 | ||
@@ -882,6 +883,11 @@ void dccp_close(struct sock *sk, long timeout) | |||
882 | sk_stream_wait_close(sk, timeout); | 883 | sk_stream_wait_close(sk, timeout); |
883 | 884 | ||
884 | adjudge_to_death: | 885 | adjudge_to_death: |
886 | state = sk->sk_state; | ||
887 | sock_hold(sk); | ||
888 | sock_orphan(sk); | ||
889 | atomic_inc(sk->sk_prot->orphan_count); | ||
890 | |||
885 | /* | 891 | /* |
886 | * It is the last release_sock in its life. It will remove backlog. | 892 | * It is the last release_sock in its life. It will remove backlog. |
887 | */ | 893 | */ |
@@ -894,8 +900,9 @@ adjudge_to_death: | |||
894 | bh_lock_sock(sk); | 900 | bh_lock_sock(sk); |
895 | BUG_TRAP(!sock_owned_by_user(sk)); | 901 | BUG_TRAP(!sock_owned_by_user(sk)); |
896 | 902 | ||
897 | sock_hold(sk); | 903 | /* Have we already been destroyed by a softirq or backlog? */ |
898 | sock_orphan(sk); | 904 | if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED) |
905 | goto out; | ||
899 | 906 | ||
900 | /* | 907 | /* |
901 | * The last release_sock may have processed the CLOSE or RESET | 908 | * The last release_sock may have processed the CLOSE or RESET |
@@ -915,12 +922,12 @@ adjudge_to_death: | |||
915 | #endif | 922 | #endif |
916 | } | 923 | } |
917 | 924 | ||
918 | atomic_inc(sk->sk_prot->orphan_count); | ||
919 | if (sk->sk_state == DCCP_CLOSED) | 925 | if (sk->sk_state == DCCP_CLOSED) |
920 | inet_csk_destroy_sock(sk); | 926 | inet_csk_destroy_sock(sk); |
921 | 927 | ||
922 | /* Otherwise, socket is reprieved until protocol close. */ | 928 | /* Otherwise, socket is reprieved until protocol close. */ |
923 | 929 | ||
930 | out: | ||
924 | bh_unlock_sock(sk); | 931 | bh_unlock_sock(sk); |
925 | local_bh_enable(); | 932 | local_bh_enable(); |
926 | sock_put(sk); | 933 | sock_put(sk); |