aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@mandriva.com>2005-08-24 00:50:06 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 19:04:31 -0400
commit7ad07e7cf343181002c10c39d3f57a88e4903d4f (patch)
treec22067f3f443faebdcd3403fa8ce7c5c89662c60
parent58e45131dc269eff0983c6d44494f9e687686900 (diff)
[DCCP]: Implement the CLOSING timer
So that we retransmit CLOSE/CLOSEREQ packets till they elicit an answer or we hit a timeout. Most of the machinery uses TCP approaches, this code has to be polished & audited, but this is better than we had before. Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/dccp/dccp.h2
-rw-r--r--net/dccp/input.c26
-rw-r--r--net/dccp/output.c23
-rw-r--r--net/dccp/proto.c26
4 files changed, 46 insertions, 31 deletions
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 33968a9422f2..53994f10ced5 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -255,7 +255,7 @@ extern int dccp_v4_checksum(const struct sk_buff *skb,
255 255
256extern int dccp_v4_send_reset(struct sock *sk, 256extern int dccp_v4_send_reset(struct sock *sk,
257 enum dccp_reset_codes code); 257 enum dccp_reset_codes code);
258extern void dccp_send_close(struct sock *sk); 258extern void dccp_send_close(struct sock *sk, const int active);
259 259
260struct dccp_skb_cb { 260struct dccp_skb_cb {
261 __u8 dccpd_type; 261 __u8 dccpd_type;
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 85402532e4e9..02af05ec23a2 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -31,14 +31,9 @@ static void dccp_fin(struct sock *sk, struct sk_buff *skb)
31 31
32static void dccp_rcv_close(struct sock *sk, struct sk_buff *skb) 32static void dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
33{ 33{
34 switch (sk->sk_state) { 34 dccp_v4_send_reset(sk, DCCP_RESET_CODE_CLOSED);
35 case DCCP_PARTOPEN: 35 dccp_fin(sk, skb);
36 case DCCP_OPEN: 36 dccp_set_state(sk, DCCP_CLOSED);
37 dccp_v4_send_reset(sk, DCCP_RESET_CODE_CLOSED);
38 dccp_fin(sk, skb);
39 dccp_set_state(sk, DCCP_CLOSED);
40 break;
41 }
42} 37}
43 38
44static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb) 39static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
@@ -54,13 +49,8 @@ static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
54 return; 49 return;
55 } 50 }
56 51
57 switch (sk->sk_state) { 52 dccp_set_state(sk, DCCP_CLOSING);
58 case DCCP_PARTOPEN: 53 dccp_send_close(sk, 0);
59 case DCCP_OPEN:
60 dccp_set_state(sk, DCCP_CLOSING);
61 dccp_send_close(sk);
62 break;
63 }
64} 54}
65 55
66static inline void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb) 56static inline void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
@@ -562,6 +552,12 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
562 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, 552 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
563 DCCP_PKT_SYNC); 553 DCCP_PKT_SYNC);
564 goto discard; 554 goto discard;
555 } else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) {
556 dccp_rcv_closereq(sk, skb);
557 goto discard;
558 } else if (dh->dccph_type == DCCP_PKT_CLOSE) {
559 dccp_rcv_close(sk, skb);
560 return 0;
565 } 561 }
566 562
567 switch (sk->sk_state) { 563 switch (sk->sk_state) {
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 708fc3c0a969..630ca7741022 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -96,8 +96,7 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
96 dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr, 96 dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr,
97 inet->daddr); 97 inet->daddr);
98 98
99 if (dcb->dccpd_type == DCCP_PKT_ACK || 99 if (set_ack)
100 dcb->dccpd_type == DCCP_PKT_DATAACK)
101 dccp_event_ack_sent(sk); 100 dccp_event_ack_sent(sk);
102 101
103 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 102 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
@@ -429,18 +428,15 @@ void dccp_send_sync(struct sock *sk, const u64 seq,
429 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under 428 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
430 * any circumstances. 429 * any circumstances.
431 */ 430 */
432void dccp_send_close(struct sock *sk) 431void dccp_send_close(struct sock *sk, const int active)
433{ 432{
434 struct dccp_sock *dp = dccp_sk(sk); 433 struct dccp_sock *dp = dccp_sk(sk);
435 struct sk_buff *skb; 434 struct sk_buff *skb;
435 const unsigned int prio = active ? GFP_KERNEL : GFP_ATOMIC;
436 436
437 /* Socket is locked, keep trying until memory is available. */ 437 skb = alloc_skb(sk->sk_prot->max_header, prio);
438 for (;;) { 438 if (skb == NULL)
439 skb = alloc_skb(sk->sk_prot->max_header, GFP_KERNEL); 439 return;
440 if (skb != NULL)
441 break;
442 yield();
443 }
444 440
445 /* Reserve space for headers and prepare control bits. */ 441 /* Reserve space for headers and prepare control bits. */
446 skb_reserve(skb, sk->sk_prot->max_header); 442 skb_reserve(skb, sk->sk_prot->max_header);
@@ -449,7 +445,12 @@ void dccp_send_close(struct sock *sk)
449 DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ; 445 DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ;
450 446
451 skb_set_owner_w(skb, sk); 447 skb_set_owner_w(skb, sk);
452 dccp_transmit_skb(sk, skb); 448 if (active) {
449 BUG_TRAP(sk->sk_send_head == NULL);
450 sk->sk_send_head = skb;
451 dccp_transmit_skb(sk, skb_clone(skb, prio));
452 } else
453 dccp_transmit_skb(sk, skb);
453 454
454 ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk); 455 ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
455 ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk); 456 ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 8b613c3017c5..a3f8a8095f81 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -402,12 +402,15 @@ void dccp_close(struct sock *sk, long timeout)
402 /* Check zero linger _after_ checking for unread data. */ 402 /* Check zero linger _after_ checking for unread data. */
403 sk->sk_prot->disconnect(sk, 0); 403 sk->sk_prot->disconnect(sk, 0);
404 } else if (dccp_close_state(sk)) { 404 } else if (dccp_close_state(sk)) {
405 dccp_send_close(sk); 405 dccp_send_close(sk, 1);
406 } 406 }
407 407
408 sk_stream_wait_close(sk, timeout); 408 sk_stream_wait_close(sk, timeout);
409 409
410adjudge_to_death: 410adjudge_to_death:
411 /*
412 * It is the last release_sock in its life. It will remove backlog.
413 */
411 release_sock(sk); 414 release_sock(sk);
412 /* 415 /*
413 * Now socket is owned by kernel and we acquire BH lock 416 * Now socket is owned by kernel and we acquire BH lock
@@ -419,11 +422,26 @@ adjudge_to_death:
419 422
420 sock_hold(sk); 423 sock_hold(sk);
421 sock_orphan(sk); 424 sock_orphan(sk);
422 425
423 if (sk->sk_state != DCCP_CLOSED) 426 /*
427 * The last release_sock may have processed the CLOSE or RESET
428 * packet moving sock to CLOSED state, if not we have to fire
429 * the CLOSE/CLOSEREQ retransmission timer, see "8.3. Termination"
430 * in draft-ietf-dccp-spec-11. -acme
431 */
432 if (sk->sk_state == DCCP_CLOSING) {
433 /* FIXME: should start at 2 * RTT */
434 /* Timer for repeating the CLOSE/CLOSEREQ until an answer. */
435 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
436 inet_csk(sk)->icsk_rto,
437 DCCP_RTO_MAX);
438#if 0
439 /* Yeah, we should use sk->sk_prot->orphan_count, etc */
424 dccp_set_state(sk, DCCP_CLOSED); 440 dccp_set_state(sk, DCCP_CLOSED);
441#endif
442 }
425 443
426 atomic_inc(&dccp_orphan_count); 444 atomic_inc(sk->sk_prot->orphan_count);
427 if (sk->sk_state == DCCP_CLOSED) 445 if (sk->sk_state == DCCP_CLOSED)
428 inet_csk_destroy_sock(sk); 446 inet_csk_destroy_sock(sk);
429 447