diff options
author | Ian McDonald <ian.mcdonald@jandi.co.nz> | 2006-08-26 22:16:45 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-09-22 18:18:17 -0400 |
commit | 97e5848dd39e7e76bd6077735ebb5473763ab9c5 (patch) | |
tree | f1292b7bb558df8f27016fbac31a3b017bdba438 /net/dccp/output.c | |
parent | 2a0109a707d2b0ae48f124d3be0fdf1715c0107a (diff) |
[DCCP]: Introduce tx buffering
This adds transmit buffering to DCCP.
I have tested with CCID2/3 and with loss and rate limiting.
Signed off by: Ian McDonald <ian.mcdonald@jandi.co.nz>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/dccp/output.c')
-rw-r--r-- | net/dccp/output.c | 90 |
1 files changed, 65 insertions, 25 deletions
diff --git a/net/dccp/output.c b/net/dccp/output.c index 58669beee132..7102e3aed4ca 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c | |||
@@ -198,7 +198,7 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, | |||
198 | while (1) { | 198 | while (1) { |
199 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 199 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); |
200 | 200 | ||
201 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | 201 | if (sk->sk_err) |
202 | goto do_error; | 202 | goto do_error; |
203 | if (!*timeo) | 203 | if (!*timeo) |
204 | goto do_nonblock; | 204 | goto do_nonblock; |
@@ -234,37 +234,72 @@ do_interrupted: | |||
234 | goto out; | 234 | goto out; |
235 | } | 235 | } |
236 | 236 | ||
237 | int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, long *timeo) | 237 | static void dccp_write_xmit_timer(unsigned long data) { |
238 | struct sock *sk = (struct sock *)data; | ||
239 | struct dccp_sock *dp = dccp_sk(sk); | ||
240 | |||
241 | bh_lock_sock(sk); | ||
242 | if (sock_owned_by_user(sk)) | ||
243 | sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1); | ||
244 | else | ||
245 | dccp_write_xmit(sk, 0); | ||
246 | bh_unlock_sock(sk); | ||
247 | sock_put(sk); | ||
248 | } | ||
249 | |||
250 | void dccp_write_xmit(struct sock *sk, int block) | ||
238 | { | 251 | { |
239 | const struct dccp_sock *dp = dccp_sk(sk); | 252 | struct dccp_sock *dp = dccp_sk(sk); |
240 | int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, | 253 | struct sk_buff *skb; |
254 | long timeo = 30000; /* If a packet is taking longer than 2 secs | ||
255 | we have other issues */ | ||
256 | |||
257 | while ((skb = skb_peek(&sk->sk_write_queue))) { | ||
258 | int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, | ||
241 | skb->len); | 259 | skb->len); |
242 | 260 | ||
243 | if (err > 0) | 261 | if (err > 0) { |
244 | err = dccp_wait_for_ccid(sk, skb, timeo); | 262 | if (!block) { |
263 | sk_reset_timer(sk, &dp->dccps_xmit_timer, | ||
264 | msecs_to_jiffies(err)+jiffies); | ||
265 | break; | ||
266 | } else | ||
267 | err = dccp_wait_for_ccid(sk, skb, &timeo); | ||
268 | if (err) { | ||
269 | printk(KERN_CRIT "%s:err at dccp_wait_for_ccid" | ||
270 | " %d\n", __FUNCTION__, err); | ||
271 | dump_stack(); | ||
272 | } | ||
273 | } | ||
245 | 274 | ||
246 | if (err == 0) { | 275 | skb_dequeue(&sk->sk_write_queue); |
247 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); | 276 | if (err == 0) { |
248 | const int len = skb->len; | 277 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); |
278 | const int len = skb->len; | ||
249 | 279 | ||
250 | if (sk->sk_state == DCCP_PARTOPEN) { | 280 | if (sk->sk_state == DCCP_PARTOPEN) { |
251 | /* See 8.1.5. Handshake Completion */ | 281 | /* See 8.1.5. Handshake Completion */ |
252 | inet_csk_schedule_ack(sk); | 282 | inet_csk_schedule_ack(sk); |
253 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | 283 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, |
254 | inet_csk(sk)->icsk_rto, | 284 | inet_csk(sk)->icsk_rto, |
255 | DCCP_RTO_MAX); | 285 | DCCP_RTO_MAX); |
256 | dcb->dccpd_type = DCCP_PKT_DATAACK; | 286 | dcb->dccpd_type = DCCP_PKT_DATAACK; |
257 | } else if (dccp_ack_pending(sk)) | 287 | } else if (dccp_ack_pending(sk)) |
258 | dcb->dccpd_type = DCCP_PKT_DATAACK; | 288 | dcb->dccpd_type = DCCP_PKT_DATAACK; |
259 | else | 289 | else |
260 | dcb->dccpd_type = DCCP_PKT_DATA; | 290 | dcb->dccpd_type = DCCP_PKT_DATA; |
261 | 291 | ||
262 | err = dccp_transmit_skb(sk, skb); | 292 | err = dccp_transmit_skb(sk, skb); |
263 | ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len); | 293 | ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len); |
264 | } else | 294 | if (err) { |
265 | kfree_skb(skb); | 295 | printk(KERN_CRIT "%s:err from " |
266 | 296 | "ccid_hc_tx_packet_sent %d\n", | |
267 | return err; | 297 | __FUNCTION__, err); |
298 | dump_stack(); | ||
299 | } | ||
300 | } else | ||
301 | kfree(skb); | ||
302 | } | ||
268 | } | 303 | } |
269 | 304 | ||
270 | int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | 305 | int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb) |
@@ -426,6 +461,9 @@ static inline void dccp_connect_init(struct sock *sk) | |||
426 | dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss)); | 461 | dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss)); |
427 | 462 | ||
428 | icsk->icsk_retransmits = 0; | 463 | icsk->icsk_retransmits = 0; |
464 | init_timer(&dp->dccps_xmit_timer); | ||
465 | dp->dccps_xmit_timer.data = (unsigned long)sk; | ||
466 | dp->dccps_xmit_timer.function = dccp_write_xmit_timer; | ||
429 | } | 467 | } |
430 | 468 | ||
431 | int dccp_connect(struct sock *sk) | 469 | int dccp_connect(struct sock *sk) |
@@ -560,8 +598,10 @@ void dccp_send_close(struct sock *sk, const int active) | |||
560 | DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ; | 598 | DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ; |
561 | 599 | ||
562 | if (active) { | 600 | if (active) { |
601 | dccp_write_xmit(sk, 1); | ||
563 | dccp_skb_entail(sk, skb); | 602 | dccp_skb_entail(sk, skb); |
564 | dccp_transmit_skb(sk, skb_clone(skb, prio)); | 603 | dccp_transmit_skb(sk, skb_clone(skb, prio)); |
604 | /* FIXME do we need a retransmit timer here? */ | ||
565 | } else | 605 | } else |
566 | dccp_transmit_skb(sk, skb); | 606 | dccp_transmit_skb(sk, skb); |
567 | } | 607 | } |