diff options
Diffstat (limited to 'net/dccp/output.c')
-rw-r--r-- | net/dccp/output.c | 84 |
1 files changed, 33 insertions, 51 deletions
diff --git a/net/dccp/output.c b/net/dccp/output.c index 7102e3aed4ca..400c30b6fcae 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c | |||
@@ -88,16 +88,15 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
88 | return -EPROTO; | 88 | return -EPROTO; |
89 | } | 89 | } |
90 | 90 | ||
91 | skb->h.raw = skb_push(skb, dccp_header_size); | ||
92 | dh = dccp_hdr(skb); | ||
93 | 91 | ||
94 | /* Build DCCP header and checksum it. */ | 92 | /* Build DCCP header and checksum it. */ |
95 | memset(dh, 0, dccp_header_size); | 93 | dh = dccp_zeroed_hdr(skb, dccp_header_size); |
96 | dh->dccph_type = dcb->dccpd_type; | 94 | dh->dccph_type = dcb->dccpd_type; |
97 | dh->dccph_sport = inet->sport; | 95 | dh->dccph_sport = inet->sport; |
98 | dh->dccph_dport = inet->dport; | 96 | dh->dccph_dport = inet->dport; |
99 | dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; | 97 | dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; |
100 | dh->dccph_ccval = dcb->dccpd_ccval; | 98 | dh->dccph_ccval = dcb->dccpd_ccval; |
99 | dh->dccph_cscov = dp->dccps_pcslen; | ||
101 | /* XXX For now we're using only 48 bits sequence numbers */ | 100 | /* XXX For now we're using only 48 bits sequence numbers */ |
102 | dh->dccph_x = 1; | 101 | dh->dccph_x = 1; |
103 | 102 | ||
@@ -117,7 +116,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
117 | break; | 116 | break; |
118 | } | 117 | } |
119 | 118 | ||
120 | icsk->icsk_af_ops->send_check(sk, skb->len, skb); | 119 | icsk->icsk_af_ops->send_check(sk, 0, skb); |
121 | 120 | ||
122 | if (set_ack) | 121 | if (set_ack) |
123 | dccp_event_ack_sent(sk); | 122 | dccp_event_ack_sent(sk); |
@@ -125,17 +124,8 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
125 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); | 124 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); |
126 | 125 | ||
127 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 126 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
128 | err = icsk->icsk_af_ops->queue_xmit(skb, 0); | 127 | err = icsk->icsk_af_ops->queue_xmit(skb, sk, 0); |
129 | if (err <= 0) | 128 | return net_xmit_eval(err); |
130 | return err; | ||
131 | |||
132 | /* NET_XMIT_CN is special. It does not guarantee, | ||
133 | * that this packet is lost. It tells that device | ||
134 | * is about to start to drop packets or already | ||
135 | * drops some packets of the same priority and | ||
136 | * invokes us to send less aggressively. | ||
137 | */ | ||
138 | return err == NET_XMIT_CN ? 0 : err; | ||
139 | } | 129 | } |
140 | return -ENOBUFS; | 130 | return -ENOBUFS; |
141 | } | 131 | } |
@@ -205,8 +195,7 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, | |||
205 | if (signal_pending(current)) | 195 | if (signal_pending(current)) |
206 | goto do_interrupted; | 196 | goto do_interrupted; |
207 | 197 | ||
208 | rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, | 198 | rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); |
209 | skb->len); | ||
210 | if (rc <= 0) | 199 | if (rc <= 0) |
211 | break; | 200 | break; |
212 | delay = msecs_to_jiffies(rc); | 201 | delay = msecs_to_jiffies(rc); |
@@ -251,25 +240,23 @@ void dccp_write_xmit(struct sock *sk, int block) | |||
251 | { | 240 | { |
252 | struct dccp_sock *dp = dccp_sk(sk); | 241 | struct dccp_sock *dp = dccp_sk(sk); |
253 | struct sk_buff *skb; | 242 | struct sk_buff *skb; |
254 | long timeo = 30000; /* If a packet is taking longer than 2 secs | 243 | long timeo = DCCP_XMIT_TIMEO; /* If a packet is taking longer than |
255 | we have other issues */ | 244 | this we have other issues */ |
256 | 245 | ||
257 | while ((skb = skb_peek(&sk->sk_write_queue))) { | 246 | while ((skb = skb_peek(&sk->sk_write_queue))) { |
258 | int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, | 247 | int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); |
259 | skb->len); | ||
260 | 248 | ||
261 | if (err > 0) { | 249 | if (err > 0) { |
262 | if (!block) { | 250 | if (!block) { |
263 | sk_reset_timer(sk, &dp->dccps_xmit_timer, | 251 | sk_reset_timer(sk, &dp->dccps_xmit_timer, |
264 | msecs_to_jiffies(err)+jiffies); | 252 | msecs_to_jiffies(err)+jiffies); |
265 | break; | 253 | break; |
266 | } else | 254 | } else { |
267 | err = dccp_wait_for_ccid(sk, skb, &timeo); | 255 | err = dccp_wait_for_ccid(sk, skb, &timeo); |
268 | if (err) { | 256 | timeo = DCCP_XMIT_TIMEO; |
269 | printk(KERN_CRIT "%s:err at dccp_wait_for_ccid" | ||
270 | " %d\n", __FUNCTION__, err); | ||
271 | dump_stack(); | ||
272 | } | 257 | } |
258 | if (err) | ||
259 | DCCP_BUG("err=%d after dccp_wait_for_ccid", err); | ||
273 | } | 260 | } |
274 | 261 | ||
275 | skb_dequeue(&sk->sk_write_queue); | 262 | skb_dequeue(&sk->sk_write_queue); |
@@ -291,12 +278,9 @@ void dccp_write_xmit(struct sock *sk, int block) | |||
291 | 278 | ||
292 | err = dccp_transmit_skb(sk, skb); | 279 | err = dccp_transmit_skb(sk, skb); |
293 | ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len); | 280 | ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len); |
294 | if (err) { | 281 | if (err) |
295 | printk(KERN_CRIT "%s:err from " | 282 | DCCP_BUG("err=%d after ccid_hc_tx_packet_sent", |
296 | "ccid_hc_tx_packet_sent %d\n", | 283 | err); |
297 | __FUNCTION__, err); | ||
298 | dump_stack(); | ||
299 | } | ||
300 | } else | 284 | } else |
301 | kfree(skb); | 285 | kfree(skb); |
302 | } | 286 | } |
@@ -329,9 +313,10 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, | |||
329 | skb_reserve(skb, sk->sk_prot->max_header); | 313 | skb_reserve(skb, sk->sk_prot->max_header); |
330 | 314 | ||
331 | skb->dst = dst_clone(dst); | 315 | skb->dst = dst_clone(dst); |
332 | skb->csum = 0; | ||
333 | 316 | ||
334 | dreq = dccp_rsk(req); | 317 | dreq = dccp_rsk(req); |
318 | if (inet_rsk(req)->acked) /* increase ISS upon retransmission */ | ||
319 | dccp_inc_seqno(&dreq->dreq_iss); | ||
335 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; | 320 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; |
336 | DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss; | 321 | DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss; |
337 | 322 | ||
@@ -340,10 +325,8 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, | |||
340 | return NULL; | 325 | return NULL; |
341 | } | 326 | } |
342 | 327 | ||
343 | skb->h.raw = skb_push(skb, dccp_header_size); | 328 | /* Build and checksum header */ |
344 | 329 | dh = dccp_zeroed_hdr(skb, dccp_header_size); | |
345 | dh = dccp_hdr(skb); | ||
346 | memset(dh, 0, dccp_header_size); | ||
347 | 330 | ||
348 | dh->dccph_sport = inet_sk(sk)->sport; | 331 | dh->dccph_sport = inet_sk(sk)->sport; |
349 | dh->dccph_dport = inet_rsk(req)->rmt_port; | 332 | dh->dccph_dport = inet_rsk(req)->rmt_port; |
@@ -355,6 +338,10 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, | |||
355 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr); | 338 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr); |
356 | dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; | 339 | dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; |
357 | 340 | ||
341 | dccp_csum_outgoing(skb); | ||
342 | |||
343 | /* We use `acked' to remember that a Response was already sent. */ | ||
344 | inet_rsk(req)->acked = 1; | ||
358 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); | 345 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); |
359 | return skb; | 346 | return skb; |
360 | } | 347 | } |
@@ -379,7 +366,6 @@ static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, | |||
379 | skb_reserve(skb, sk->sk_prot->max_header); | 366 | skb_reserve(skb, sk->sk_prot->max_header); |
380 | 367 | ||
381 | skb->dst = dst_clone(dst); | 368 | skb->dst = dst_clone(dst); |
382 | skb->csum = 0; | ||
383 | 369 | ||
384 | dccp_inc_seqno(&dp->dccps_gss); | 370 | dccp_inc_seqno(&dp->dccps_gss); |
385 | 371 | ||
@@ -392,10 +378,7 @@ static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, | |||
392 | return NULL; | 378 | return NULL; |
393 | } | 379 | } |
394 | 380 | ||
395 | skb->h.raw = skb_push(skb, dccp_header_size); | 381 | dh = dccp_zeroed_hdr(skb, dccp_header_size); |
396 | |||
397 | dh = dccp_hdr(skb); | ||
398 | memset(dh, 0, dccp_header_size); | ||
399 | 382 | ||
400 | dh->dccph_sport = inet_sk(sk)->sport; | 383 | dh->dccph_sport = inet_sk(sk)->sport; |
401 | dh->dccph_dport = inet_sk(sk)->dport; | 384 | dh->dccph_dport = inet_sk(sk)->dport; |
@@ -407,7 +390,7 @@ static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, | |||
407 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr); | 390 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr); |
408 | 391 | ||
409 | dccp_hdr_reset(skb)->dccph_reset_code = code; | 392 | dccp_hdr_reset(skb)->dccph_reset_code = code; |
410 | inet_csk(sk)->icsk_af_ops->send_check(sk, skb->len, skb); | 393 | inet_csk(sk)->icsk_af_ops->send_check(sk, 0, skb); |
411 | 394 | ||
412 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); | 395 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); |
413 | return skb; | 396 | return skb; |
@@ -426,9 +409,8 @@ int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code) | |||
426 | code); | 409 | code); |
427 | if (skb != NULL) { | 410 | if (skb != NULL) { |
428 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 411 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
429 | err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, 0); | 412 | err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, sk, 0); |
430 | if (err == NET_XMIT_CN) | 413 | return net_xmit_eval(err); |
431 | err = 0; | ||
432 | } | 414 | } |
433 | } | 415 | } |
434 | 416 | ||
@@ -449,7 +431,6 @@ static inline void dccp_connect_init(struct sock *sk) | |||
449 | 431 | ||
450 | dccp_sync_mss(sk, dst_mtu(dst)); | 432 | dccp_sync_mss(sk, dst_mtu(dst)); |
451 | 433 | ||
452 | dccp_update_gss(sk, dp->dccps_iss); | ||
453 | /* | 434 | /* |
454 | * SWL and AWL are initially adjusted so that they are not less than | 435 | * SWL and AWL are initially adjusted so that they are not less than |
455 | * the initial Sequence Numbers received and sent, respectively: | 436 | * the initial Sequence Numbers received and sent, respectively: |
@@ -458,8 +439,13 @@ static inline void dccp_connect_init(struct sock *sk) | |||
458 | * These adjustments MUST be applied only at the beginning of the | 439 | * These adjustments MUST be applied only at the beginning of the |
459 | * connection. | 440 | * connection. |
460 | */ | 441 | */ |
442 | dccp_update_gss(sk, dp->dccps_iss); | ||
461 | dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss)); | 443 | dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss)); |
462 | 444 | ||
445 | /* S.GAR - greatest valid acknowledgement number received on a non-Sync; | ||
446 | * initialized to S.ISS (sec. 8.5) */ | ||
447 | dp->dccps_gar = dp->dccps_iss; | ||
448 | |||
463 | icsk->icsk_retransmits = 0; | 449 | icsk->icsk_retransmits = 0; |
464 | init_timer(&dp->dccps_xmit_timer); | 450 | init_timer(&dp->dccps_xmit_timer); |
465 | dp->dccps_xmit_timer.data = (unsigned long)sk; | 451 | dp->dccps_xmit_timer.data = (unsigned long)sk; |
@@ -481,7 +467,6 @@ int dccp_connect(struct sock *sk) | |||
481 | skb_reserve(skb, sk->sk_prot->max_header); | 467 | skb_reserve(skb, sk->sk_prot->max_header); |
482 | 468 | ||
483 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST; | 469 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST; |
484 | skb->csum = 0; | ||
485 | 470 | ||
486 | dccp_skb_entail(sk, skb); | 471 | dccp_skb_entail(sk, skb); |
487 | dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL)); | 472 | dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL)); |
@@ -513,7 +498,6 @@ void dccp_send_ack(struct sock *sk) | |||
513 | 498 | ||
514 | /* Reserve space for headers */ | 499 | /* Reserve space for headers */ |
515 | skb_reserve(skb, sk->sk_prot->max_header); | 500 | skb_reserve(skb, sk->sk_prot->max_header); |
516 | skb->csum = 0; | ||
517 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK; | 501 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK; |
518 | dccp_transmit_skb(sk, skb); | 502 | dccp_transmit_skb(sk, skb); |
519 | } | 503 | } |
@@ -567,7 +551,6 @@ void dccp_send_sync(struct sock *sk, const u64 seq, | |||
567 | 551 | ||
568 | /* Reserve space for headers and prepare control bits. */ | 552 | /* Reserve space for headers and prepare control bits. */ |
569 | skb_reserve(skb, sk->sk_prot->max_header); | 553 | skb_reserve(skb, sk->sk_prot->max_header); |
570 | skb->csum = 0; | ||
571 | DCCP_SKB_CB(skb)->dccpd_type = pkt_type; | 554 | DCCP_SKB_CB(skb)->dccpd_type = pkt_type; |
572 | DCCP_SKB_CB(skb)->dccpd_seq = seq; | 555 | DCCP_SKB_CB(skb)->dccpd_seq = seq; |
573 | 556 | ||
@@ -593,7 +576,6 @@ void dccp_send_close(struct sock *sk, const int active) | |||
593 | 576 | ||
594 | /* Reserve space for headers and prepare control bits. */ | 577 | /* Reserve space for headers and prepare control bits. */ |
595 | skb_reserve(skb, sk->sk_prot->max_header); | 578 | skb_reserve(skb, sk->sk_prot->max_header); |
596 | skb->csum = 0; | ||
597 | DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ? | 579 | DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ? |
598 | DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ; | 580 | DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ; |
599 | 581 | ||