diff options
author | Gerrit Renker <gerrit@erg.abdn.ac.uk> | 2010-10-27 15:16:27 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-10-28 13:27:01 -0400 |
commit | b1fcf55eea541af9efa5d39f5a0d1aec8ceca55d (patch) | |
tree | a021b6abde9c784d67ee0de3bb7fb31f7d5b2e9f /net/dccp/output.c | |
parent | dc841e30eaea9f9f83c9ab1ee0b3ef9e5c95ce8a (diff) |
dccp: Refine the wait-for-ccid mechanism
This extends the existing wait-for-ccid routine so that it may be used with
different types of CCID, addressing the following problems:
1) The queue-drain mechanism only works with rate-based CCIDs. If CCID-2 for
example has a full TX queue and becomes network-limited just as the
application wants to close, then waiting for CCID-2 to become unblocked
could lead to an indefinite delay (i.e., application "hangs").
2) Since each TX CCID in turn uses a feedback mechanism, there may be changes
in its sending policy while the queue is being drained. This can lead to
further delays during which the application will not be able to terminate.
3) The minimum wait time for CCID-3/4 can be expected to be the queue length
times the current inter-packet delay. For example if tx_qlen=100 and a delay
of 15 ms is used for each packet, then the application would have to wait
for a minimum of 1.5 seconds before being allowed to exit.
4) There is no way for the user/application to control this behaviour. It would
be good to use the timeout argument of dccp_close() as an upper bound. Then
the maximum time that an application is willing to wait for its CCIDs to can
be set via the SO_LINGER option.
These problems are addressed by giving the CCID a grace period of up to the
`timeout' value.
The wait-for-ccid function is, as before, used when the application
(a) has read all the data in its receive buffer and
(b) if SO_LINGER was set with a non-zero linger time, or
(c) the socket is either in the OPEN (active close) or in the PASSIVE_CLOSEREQ
state (client application closes after receiving CloseReq).
In addition, there is a catch-all case of __skb_queue_purge() after waiting for
the CCID. This is necessary since the write queue may still have data when
(a) the host has been passively-closed,
(b) abnormal termination (unread data, zero linger time),
(c) wait-for-ccid could not finish within the given time limit.
Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/dccp/output.c')
-rw-r--r-- | net/dccp/output.c | 115 |
1 files changed, 65 insertions, 50 deletions
diff --git a/net/dccp/output.c b/net/dccp/output.c index 11418a9a389d..45b91853f5ae 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c | |||
@@ -209,49 +209,29 @@ void dccp_write_space(struct sock *sk) | |||
209 | } | 209 | } |
210 | 210 | ||
211 | /** | 211 | /** |
212 | * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet | 212 | * dccp_wait_for_ccid - Await CCID send permission |
213 | * @sk: socket to wait for | 213 | * @sk: socket to wait for |
214 | * @skb: current skb to pass on for waiting | 214 | * @delay: timeout in jiffies |
215 | * @delay: sleep timeout in milliseconds (> 0) | 215 | * This is used by CCIDs which need to delay the send time in process context. |
216 | * This function is called by default when the socket is closed, and | ||
217 | * when a non-zero linger time is set on the socket. For consistency | ||
218 | */ | 216 | */ |
219 | static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay) | 217 | static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay) |
220 | { | 218 | { |
221 | struct dccp_sock *dp = dccp_sk(sk); | ||
222 | DEFINE_WAIT(wait); | 219 | DEFINE_WAIT(wait); |
223 | unsigned long jiffdelay; | 220 | long remaining; |
224 | int rc; | ||
225 | |||
226 | do { | ||
227 | dccp_pr_debug("delayed send by %d msec\n", delay); | ||
228 | jiffdelay = msecs_to_jiffies(delay); | ||
229 | |||
230 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | ||
231 | 221 | ||
232 | sk->sk_write_pending++; | 222 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
233 | release_sock(sk); | 223 | sk->sk_write_pending++; |
234 | schedule_timeout(jiffdelay); | 224 | release_sock(sk); |
235 | lock_sock(sk); | ||
236 | sk->sk_write_pending--; | ||
237 | 225 | ||
238 | if (sk->sk_err) | 226 | remaining = schedule_timeout(delay); |
239 | goto do_error; | ||
240 | if (signal_pending(current)) | ||
241 | goto do_interrupted; | ||
242 | 227 | ||
243 | rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); | 228 | lock_sock(sk); |
244 | } while ((delay = rc) > 0); | 229 | sk->sk_write_pending--; |
245 | out: | ||
246 | finish_wait(sk_sleep(sk), &wait); | 230 | finish_wait(sk_sleep(sk), &wait); |
247 | return rc; | 231 | |
248 | 232 | if (signal_pending(current) || sk->sk_err) | |
249 | do_error: | 233 | return -1; |
250 | rc = -EPIPE; | 234 | return remaining; |
251 | goto out; | ||
252 | do_interrupted: | ||
253 | rc = -EINTR; | ||
254 | goto out; | ||
255 | } | 235 | } |
256 | 236 | ||
257 | /** | 237 | /** |
@@ -305,7 +285,53 @@ static void dccp_xmit_packet(struct sock *sk) | |||
305 | ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len); | 285 | ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len); |
306 | } | 286 | } |
307 | 287 | ||
308 | void dccp_write_xmit(struct sock *sk, int block) | 288 | /** |
289 | * dccp_flush_write_queue - Drain queue at end of connection | ||
290 | * Since dccp_sendmsg queues packets without waiting for them to be sent, it may | ||
291 | * happen that the TX queue is not empty at the end of a connection. We give the | ||
292 | * HC-sender CCID a grace period of up to @time_budget jiffies. If this function | ||
293 | * returns with a non-empty write queue, it will be purged later. | ||
294 | */ | ||
295 | void dccp_flush_write_queue(struct sock *sk, long *time_budget) | ||
296 | { | ||
297 | struct dccp_sock *dp = dccp_sk(sk); | ||
298 | struct sk_buff *skb; | ||
299 | long delay, rc; | ||
300 | |||
301 | while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) { | ||
302 | rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); | ||
303 | |||
304 | switch (ccid_packet_dequeue_eval(rc)) { | ||
305 | case CCID_PACKET_WILL_DEQUEUE_LATER: | ||
306 | /* | ||
307 | * If the CCID determines when to send, the next sending | ||
308 | * time is unknown or the CCID may not even send again | ||
309 | * (e.g. remote host crashes or lost Ack packets). | ||
310 | */ | ||
311 | DCCP_WARN("CCID did not manage to send all packets\n"); | ||
312 | return; | ||
313 | case CCID_PACKET_DELAY: | ||
314 | delay = msecs_to_jiffies(rc); | ||
315 | if (delay > *time_budget) | ||
316 | return; | ||
317 | rc = dccp_wait_for_ccid(sk, delay); | ||
318 | if (rc < 0) | ||
319 | return; | ||
320 | *time_budget -= (delay - rc); | ||
321 | /* check again if we can send now */ | ||
322 | break; | ||
323 | case CCID_PACKET_SEND_AT_ONCE: | ||
324 | dccp_xmit_packet(sk); | ||
325 | break; | ||
326 | case CCID_PACKET_ERR: | ||
327 | skb_dequeue(&sk->sk_write_queue); | ||
328 | kfree_skb(skb); | ||
329 | dccp_pr_debug("packet discarded due to err=%ld\n", rc); | ||
330 | } | ||
331 | } | ||
332 | } | ||
333 | |||
334 | void dccp_write_xmit(struct sock *sk) | ||
309 | { | 335 | { |
310 | struct dccp_sock *dp = dccp_sk(sk); | 336 | struct dccp_sock *dp = dccp_sk(sk); |
311 | struct sk_buff *skb; | 337 | struct sk_buff *skb; |
@@ -317,19 +343,9 @@ void dccp_write_xmit(struct sock *sk, int block) | |||
317 | case CCID_PACKET_WILL_DEQUEUE_LATER: | 343 | case CCID_PACKET_WILL_DEQUEUE_LATER: |
318 | return; | 344 | return; |
319 | case CCID_PACKET_DELAY: | 345 | case CCID_PACKET_DELAY: |
320 | if (!block) { | 346 | sk_reset_timer(sk, &dp->dccps_xmit_timer, |
321 | sk_reset_timer(sk, &dp->dccps_xmit_timer, | 347 | jiffies + msecs_to_jiffies(rc)); |
322 | msecs_to_jiffies(rc)+jiffies); | 348 | return; |
323 | return; | ||
324 | } | ||
325 | rc = dccp_wait_for_ccid(sk, skb, rc); | ||
326 | if (rc && rc != -EINTR) { | ||
327 | DCCP_BUG("err=%d after dccp_wait_for_ccid", rc); | ||
328 | skb_dequeue(&sk->sk_write_queue); | ||
329 | kfree_skb(skb); | ||
330 | break; | ||
331 | } | ||
332 | /* fall through */ | ||
333 | case CCID_PACKET_SEND_AT_ONCE: | 349 | case CCID_PACKET_SEND_AT_ONCE: |
334 | dccp_xmit_packet(sk); | 350 | dccp_xmit_packet(sk); |
335 | break; | 351 | break; |
@@ -648,7 +664,6 @@ void dccp_send_close(struct sock *sk, const int active) | |||
648 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE; | 664 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE; |
649 | 665 | ||
650 | if (active) { | 666 | if (active) { |
651 | dccp_write_xmit(sk, 1); | ||
652 | dccp_skb_entail(sk, skb); | 667 | dccp_skb_entail(sk, skb); |
653 | dccp_transmit_skb(sk, skb_clone(skb, prio)); | 668 | dccp_transmit_skb(sk, skb_clone(skb, prio)); |
654 | /* | 669 | /* |