diff options
author | Gerrit Renker <gerrit@erg.abdn.ac.uk> | 2010-10-27 15:16:26 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-10-28 13:27:00 -0400 |
commit | dc841e30eaea9f9f83c9ab1ee0b3ef9e5c95ce8a (patch) | |
tree | 921458d0ea02f1478dbac9305c1925dbea8c0dd9 /net | |
parent | fe84f4140f0d24deca8591e38926b95cfd097e62 (diff) |
dccp: Extend CCID packet dequeueing interface
This extends the packet dequeuing interface of dccp_write_xmit() to allow
1. CCIDs to take care of timing when the next packet may be sent;
2. delayed sending (as before, with an inter-packet gap up to 65.535 seconds).
The main purpose is to take CCID-2 out of its polling mode (when it is network-
limited, it tries every millisecond to send, without interruption).
The mode of operation for (2) is as follows:
* new packet is enqueued via dccp_sendmsg() => dccp_write_xmit(),
* ccid_hc_tx_send_packet() detects that it may not send (e.g. window full),
* it signals this condition via `CCID_PACKET_WILL_DEQUEUE_LATER',
* dccp_write_xmit() returns without further action;
* after some time the wait-condition for CCID becomes true,
* that CCID schedules the tasklet,
* tasklet function calls ccid_hc_tx_send_packet() via dccp_write_xmit(),
* since the wait-condition is now true, ccid_hc_tx_packet() returns "send now",
* packet is sent, and possibly more (since dccp_write_xmit() loops).
Code reuse: the taskled function calls dccp_write_xmit(), the timer function
reduces to a wrapper around the same code.
Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/dccp/output.c | 118 | ||||
-rw-r--r-- | net/dccp/timer.c | 25 |
2 files changed, 86 insertions, 57 deletions
diff --git a/net/dccp/output.c b/net/dccp/output.c index a988fe9ffcba..11418a9a389d 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c | |||
@@ -254,63 +254,89 @@ do_interrupted: | |||
254 | goto out; | 254 | goto out; |
255 | } | 255 | } |
256 | 256 | ||
257 | /** | ||
258 | * dccp_xmit_packet - Send data packet under control of CCID | ||
259 | * Transmits next-queued payload and informs CCID to account for the packet. | ||
260 | */ | ||
261 | static void dccp_xmit_packet(struct sock *sk) | ||
262 | { | ||
263 | int err, len; | ||
264 | struct dccp_sock *dp = dccp_sk(sk); | ||
265 | struct sk_buff *skb = skb_dequeue(&sk->sk_write_queue); | ||
266 | |||
267 | if (unlikely(skb == NULL)) | ||
268 | return; | ||
269 | len = skb->len; | ||
270 | |||
271 | if (sk->sk_state == DCCP_PARTOPEN) { | ||
272 | const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD; | ||
273 | /* | ||
274 | * See 8.1.5 - Handshake Completion. | ||
275 | * | ||
276 | * For robustness we resend Confirm options until the client has | ||
277 | * entered OPEN. During the initial feature negotiation, the MPS | ||
278 | * is smaller than usual, reduced by the Change/Confirm options. | ||
279 | */ | ||
280 | if (!list_empty(&dp->dccps_featneg) && len > cur_mps) { | ||
281 | DCCP_WARN("Payload too large (%d) for featneg.\n", len); | ||
282 | dccp_send_ack(sk); | ||
283 | dccp_feat_list_purge(&dp->dccps_featneg); | ||
284 | } | ||
285 | |||
286 | inet_csk_schedule_ack(sk); | ||
287 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | ||
288 | inet_csk(sk)->icsk_rto, | ||
289 | DCCP_RTO_MAX); | ||
290 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK; | ||
291 | } else if (dccp_ack_pending(sk)) { | ||
292 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK; | ||
293 | } else { | ||
294 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA; | ||
295 | } | ||
296 | |||
297 | err = dccp_transmit_skb(sk, skb); | ||
298 | if (err) | ||
299 | dccp_pr_debug("transmit_skb() returned err=%d\n", err); | ||
300 | /* | ||
301 | * Register this one as sent even if an error occurred. To the remote | ||
302 | * end a local packet drop is indistinguishable from network loss, i.e. | ||
303 | * any local drop will eventually be reported via receiver feedback. | ||
304 | */ | ||
305 | ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len); | ||
306 | } | ||
307 | |||
257 | void dccp_write_xmit(struct sock *sk, int block) | 308 | void dccp_write_xmit(struct sock *sk, int block) |
258 | { | 309 | { |
259 | struct dccp_sock *dp = dccp_sk(sk); | 310 | struct dccp_sock *dp = dccp_sk(sk); |
260 | struct sk_buff *skb; | 311 | struct sk_buff *skb; |
261 | 312 | ||
262 | while ((skb = skb_peek(&sk->sk_write_queue))) { | 313 | while ((skb = skb_peek(&sk->sk_write_queue))) { |
263 | int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); | 314 | int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); |
264 | 315 | ||
265 | if (err > 0) { | 316 | switch (ccid_packet_dequeue_eval(rc)) { |
317 | case CCID_PACKET_WILL_DEQUEUE_LATER: | ||
318 | return; | ||
319 | case CCID_PACKET_DELAY: | ||
266 | if (!block) { | 320 | if (!block) { |
267 | sk_reset_timer(sk, &dp->dccps_xmit_timer, | 321 | sk_reset_timer(sk, &dp->dccps_xmit_timer, |
268 | msecs_to_jiffies(err)+jiffies); | 322 | msecs_to_jiffies(rc)+jiffies); |
323 | return; | ||
324 | } | ||
325 | rc = dccp_wait_for_ccid(sk, skb, rc); | ||
326 | if (rc && rc != -EINTR) { | ||
327 | DCCP_BUG("err=%d after dccp_wait_for_ccid", rc); | ||
328 | skb_dequeue(&sk->sk_write_queue); | ||
329 | kfree_skb(skb); | ||
269 | break; | 330 | break; |
270 | } else | 331 | } |
271 | err = dccp_wait_for_ccid(sk, skb, err); | 332 | /* fall through */ |
272 | if (err && err != -EINTR) | 333 | case CCID_PACKET_SEND_AT_ONCE: |
273 | DCCP_BUG("err=%d after dccp_wait_for_ccid", err); | 334 | dccp_xmit_packet(sk); |
274 | } | 335 | break; |
275 | 336 | case CCID_PACKET_ERR: | |
276 | skb_dequeue(&sk->sk_write_queue); | 337 | skb_dequeue(&sk->sk_write_queue); |
277 | if (err == 0) { | ||
278 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); | ||
279 | const int len = skb->len; | ||
280 | |||
281 | if (sk->sk_state == DCCP_PARTOPEN) { | ||
282 | const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD; | ||
283 | /* | ||
284 | * See 8.1.5 - Handshake Completion. | ||
285 | * | ||
286 | * For robustness we resend Confirm options until the client has | ||
287 | * entered OPEN. During the initial feature negotiation, the MPS | ||
288 | * is smaller than usual, reduced by the Change/Confirm options. | ||
289 | */ | ||
290 | if (!list_empty(&dp->dccps_featneg) && len > cur_mps) { | ||
291 | DCCP_WARN("Payload too large (%d) for featneg.\n", len); | ||
292 | dccp_send_ack(sk); | ||
293 | dccp_feat_list_purge(&dp->dccps_featneg); | ||
294 | } | ||
295 | |||
296 | inet_csk_schedule_ack(sk); | ||
297 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | ||
298 | inet_csk(sk)->icsk_rto, | ||
299 | DCCP_RTO_MAX); | ||
300 | dcb->dccpd_type = DCCP_PKT_DATAACK; | ||
301 | } else if (dccp_ack_pending(sk)) | ||
302 | dcb->dccpd_type = DCCP_PKT_DATAACK; | ||
303 | else | ||
304 | dcb->dccpd_type = DCCP_PKT_DATA; | ||
305 | |||
306 | err = dccp_transmit_skb(sk, skb); | ||
307 | ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len); | ||
308 | if (err) | ||
309 | DCCP_BUG("err=%d after ccid_hc_tx_packet_sent", | ||
310 | err); | ||
311 | } else { | ||
312 | dccp_pr_debug("packet discarded due to err=%d\n", err); | ||
313 | kfree_skb(skb); | 338 | kfree_skb(skb); |
339 | dccp_pr_debug("packet discarded due to err=%d\n", rc); | ||
314 | } | 340 | } |
315 | } | 341 | } |
316 | } | 342 | } |
diff --git a/net/dccp/timer.c b/net/dccp/timer.c index 1a9aa05d4dc4..916f9d1dab36 100644 --- a/net/dccp/timer.c +++ b/net/dccp/timer.c | |||
@@ -237,32 +237,35 @@ out: | |||
237 | sock_put(sk); | 237 | sock_put(sk); |
238 | } | 238 | } |
239 | 239 | ||
240 | /* Transmit-delay timer: used by the CCIDs to delay actual send time */ | 240 | /** |
241 | static void dccp_write_xmit_timer(unsigned long data) | 241 | * dccp_write_xmitlet - Workhorse for CCID packet dequeueing interface |
242 | * See the comments above %ccid_dequeueing_decision for supported modes. | ||
243 | */ | ||
244 | static void dccp_write_xmitlet(unsigned long data) | ||
242 | { | 245 | { |
243 | struct sock *sk = (struct sock *)data; | 246 | struct sock *sk = (struct sock *)data; |
244 | struct dccp_sock *dp = dccp_sk(sk); | ||
245 | 247 | ||
246 | bh_lock_sock(sk); | 248 | bh_lock_sock(sk); |
247 | if (sock_owned_by_user(sk)) | 249 | if (sock_owned_by_user(sk)) |
248 | sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1); | 250 | sk_reset_timer(sk, &dccp_sk(sk)->dccps_xmit_timer, jiffies + 1); |
249 | else | 251 | else |
250 | dccp_write_xmit(sk, 0); | 252 | dccp_write_xmit(sk, 0); |
251 | bh_unlock_sock(sk); | 253 | bh_unlock_sock(sk); |
252 | sock_put(sk); | ||
253 | } | 254 | } |
254 | 255 | ||
255 | static void dccp_init_write_xmit_timer(struct sock *sk) | 256 | static void dccp_write_xmit_timer(unsigned long data) |
256 | { | 257 | { |
257 | struct dccp_sock *dp = dccp_sk(sk); | 258 | dccp_write_xmitlet(data); |
258 | 259 | sock_put((struct sock *)data); | |
259 | setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer, | ||
260 | (unsigned long)sk); | ||
261 | } | 260 | } |
262 | 261 | ||
263 | void dccp_init_xmit_timers(struct sock *sk) | 262 | void dccp_init_xmit_timers(struct sock *sk) |
264 | { | 263 | { |
265 | dccp_init_write_xmit_timer(sk); | 264 | struct dccp_sock *dp = dccp_sk(sk); |
265 | |||
266 | tasklet_init(&dp->dccps_xmitlet, dccp_write_xmitlet, (unsigned long)sk); | ||
267 | setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer, | ||
268 | (unsigned long)sk); | ||
266 | inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer, | 269 | inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer, |
267 | &dccp_keepalive_timer); | 270 | &dccp_keepalive_timer); |
268 | } | 271 | } |