aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp/output.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@mandriva.com>2005-08-24 00:50:06 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 19:04:31 -0400
commit7ad07e7cf343181002c10c39d3f57a88e4903d4f (patch)
treec22067f3f443faebdcd3403fa8ce7c5c89662c60 /net/dccp/output.c
parent58e45131dc269eff0983c6d44494f9e687686900 (diff)
[DCCP]: Implement the CLOSING timer
So that we retransmit CLOSE/CLOSEREQ packets till they elicit an answer or we hit a timeout. Most of the machinery uses TCP approaches, this code has to be polished & audited, but this is better than we had before. Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/dccp/output.c')
-rw-r--r--net/dccp/output.c23
1 files changed, 12 insertions, 11 deletions
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 708fc3c0a969..630ca7741022 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -96,8 +96,7 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
96 dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr, 96 dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr,
97 inet->daddr); 97 inet->daddr);
98 98
99 if (dcb->dccpd_type == DCCP_PKT_ACK || 99 if (set_ack)
100 dcb->dccpd_type == DCCP_PKT_DATAACK)
101 dccp_event_ack_sent(sk); 100 dccp_event_ack_sent(sk);
102 101
103 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 102 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
@@ -429,18 +428,15 @@ void dccp_send_sync(struct sock *sk, const u64 seq,
429 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under 428 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
430 * any circumstances. 429 * any circumstances.
431 */ 430 */
432void dccp_send_close(struct sock *sk) 431void dccp_send_close(struct sock *sk, const int active)
433{ 432{
434 struct dccp_sock *dp = dccp_sk(sk); 433 struct dccp_sock *dp = dccp_sk(sk);
435 struct sk_buff *skb; 434 struct sk_buff *skb;
435 const unsigned int prio = active ? GFP_KERNEL : GFP_ATOMIC;
436 436
437 /* Socket is locked, keep trying until memory is available. */ 437 skb = alloc_skb(sk->sk_prot->max_header, prio);
438 for (;;) { 438 if (skb == NULL)
439 skb = alloc_skb(sk->sk_prot->max_header, GFP_KERNEL); 439 return;
440 if (skb != NULL)
441 break;
442 yield();
443 }
444 440
445 /* Reserve space for headers and prepare control bits. */ 441 /* Reserve space for headers and prepare control bits. */
446 skb_reserve(skb, sk->sk_prot->max_header); 442 skb_reserve(skb, sk->sk_prot->max_header);
@@ -449,7 +445,12 @@ void dccp_send_close(struct sock *sk)
449 DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ; 445 DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ;
450 446
451 skb_set_owner_w(skb, sk); 447 skb_set_owner_w(skb, sk);
452 dccp_transmit_skb(sk, skb); 448 if (active) {
449 BUG_TRAP(sk->sk_send_head == NULL);
450 sk->sk_send_head = skb;
451 dccp_transmit_skb(sk, skb_clone(skb, prio));
452 } else
453 dccp_transmit_skb(sk, skb);
453 454
454 ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk); 455 ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
455 ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk); 456 ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);