diff options
Diffstat (limited to 'net/dccp/input.c')
-rw-r--r-- | net/dccp/input.c | 88 |
1 files changed, 73 insertions, 15 deletions
diff --git a/net/dccp/input.c b/net/dccp/input.c index ef299fbd7c26..fe4b0fbfa508 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c | |||
@@ -32,16 +32,56 @@ static void dccp_fin(struct sock *sk, struct sk_buff *skb) | |||
32 | sk->sk_data_ready(sk, 0); | 32 | sk->sk_data_ready(sk, 0); |
33 | } | 33 | } |
34 | 34 | ||
35 | static void dccp_rcv_close(struct sock *sk, struct sk_buff *skb) | 35 | static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb) |
36 | { | 36 | { |
37 | dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED); | 37 | int queued = 0; |
38 | dccp_fin(sk, skb); | 38 | |
39 | dccp_set_state(sk, DCCP_CLOSED); | 39 | switch (sk->sk_state) { |
40 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); | 40 | /* |
41 | * We ignore Close when received in one of the following states: | ||
42 | * - CLOSED (may be a late or duplicate packet) | ||
43 | * - PASSIVE_CLOSEREQ (the peer has sent a CloseReq earlier) | ||
44 | * - RESPOND (already handled by dccp_check_req) | ||
45 | */ | ||
46 | case DCCP_CLOSING: | ||
47 | /* | ||
48 | * Simultaneous-close: receiving a Close after sending one. This | ||
49 | * can happen if both client and server perform active-close and | ||
50 | * will result in an endless ping-pong of crossing and retrans- | ||
51 | * mitted Close packets, which only terminates when one of the | ||
52 | * nodes times out (min. 64 seconds). Quicker convergence can be | ||
53 | * achieved when one of the nodes acts as tie-breaker. | ||
54 | * This is ok as both ends are done with data transfer and each | ||
55 | * end is just waiting for the other to acknowledge termination. | ||
56 | */ | ||
57 | if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) | ||
58 | break; | ||
59 | /* fall through */ | ||
60 | case DCCP_REQUESTING: | ||
61 | case DCCP_ACTIVE_CLOSEREQ: | ||
62 | dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED); | ||
63 | dccp_done(sk); | ||
64 | break; | ||
65 | case DCCP_OPEN: | ||
66 | case DCCP_PARTOPEN: | ||
67 | /* Give waiting application a chance to read pending data */ | ||
68 | queued = 1; | ||
69 | dccp_fin(sk, skb); | ||
70 | dccp_set_state(sk, DCCP_PASSIVE_CLOSE); | ||
71 | /* fall through */ | ||
72 | case DCCP_PASSIVE_CLOSE: | ||
73 | /* | ||
74 | * Retransmitted Close: we have already enqueued the first one. | ||
75 | */ | ||
76 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); | ||
77 | } | ||
78 | return queued; | ||
41 | } | 79 | } |
42 | 80 | ||
43 | static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb) | 81 | static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb) |
44 | { | 82 | { |
83 | int queued = 0; | ||
84 | |||
45 | /* | 85 | /* |
46 | * Step 7: Check for unexpected packet types | 86 | * Step 7: Check for unexpected packet types |
47 | * If (S.is_server and P.type == CloseReq) | 87 | * If (S.is_server and P.type == CloseReq) |
@@ -50,12 +90,26 @@ static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb) | |||
50 | */ | 90 | */ |
51 | if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) { | 91 | if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) { |
52 | dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC); | 92 | dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC); |
53 | return; | 93 | return queued; |
54 | } | 94 | } |
55 | 95 | ||
56 | if (sk->sk_state != DCCP_CLOSING) | 96 | /* Step 13: process relevant Client states < CLOSEREQ */ |
97 | switch (sk->sk_state) { | ||
98 | case DCCP_REQUESTING: | ||
99 | dccp_send_close(sk, 0); | ||
57 | dccp_set_state(sk, DCCP_CLOSING); | 100 | dccp_set_state(sk, DCCP_CLOSING); |
58 | dccp_send_close(sk, 0); | 101 | break; |
102 | case DCCP_OPEN: | ||
103 | case DCCP_PARTOPEN: | ||
104 | /* Give waiting application a chance to read pending data */ | ||
105 | queued = 1; | ||
106 | dccp_fin(sk, skb); | ||
107 | dccp_set_state(sk, DCCP_PASSIVE_CLOSEREQ); | ||
108 | /* fall through */ | ||
109 | case DCCP_PASSIVE_CLOSEREQ: | ||
110 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); | ||
111 | } | ||
112 | return queued; | ||
59 | } | 113 | } |
60 | 114 | ||
61 | static u8 dccp_reset_code_convert(const u8 code) | 115 | static u8 dccp_reset_code_convert(const u8 code) |
@@ -247,11 +301,13 @@ static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
247 | dccp_rcv_reset(sk, skb); | 301 | dccp_rcv_reset(sk, skb); |
248 | return 0; | 302 | return 0; |
249 | case DCCP_PKT_CLOSEREQ: | 303 | case DCCP_PKT_CLOSEREQ: |
250 | dccp_rcv_closereq(sk, skb); | 304 | if (dccp_rcv_closereq(sk, skb)) |
305 | return 0; | ||
251 | goto discard; | 306 | goto discard; |
252 | case DCCP_PKT_CLOSE: | 307 | case DCCP_PKT_CLOSE: |
253 | dccp_rcv_close(sk, skb); | 308 | if (dccp_rcv_close(sk, skb)) |
254 | return 0; | 309 | return 0; |
310 | goto discard; | ||
255 | case DCCP_PKT_REQUEST: | 311 | case DCCP_PKT_REQUEST: |
256 | /* Step 7 | 312 | /* Step 7 |
257 | * or (S.is_server and P.type == Response) | 313 | * or (S.is_server and P.type == Response) |
@@ -590,11 +646,13 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
590 | dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC); | 646 | dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC); |
591 | goto discard; | 647 | goto discard; |
592 | } else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) { | 648 | } else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) { |
593 | dccp_rcv_closereq(sk, skb); | 649 | if (dccp_rcv_closereq(sk, skb)) |
650 | return 0; | ||
594 | goto discard; | 651 | goto discard; |
595 | } else if (dh->dccph_type == DCCP_PKT_CLOSE) { | 652 | } else if (dh->dccph_type == DCCP_PKT_CLOSE) { |
596 | dccp_rcv_close(sk, skb); | 653 | if (dccp_rcv_close(sk, skb)) |
597 | return 0; | 654 | return 0; |
655 | goto discard; | ||
598 | } | 656 | } |
599 | 657 | ||
600 | switch (sk->sk_state) { | 658 | switch (sk->sk_state) { |