aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp/output.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/dccp/output.c')
-rw-r--r--net/dccp/output.c88
1 files changed, 61 insertions, 27 deletions
diff --git a/net/dccp/output.c b/net/dccp/output.c
index efd7ffb903a1..7409e4a3abdf 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -27,7 +27,7 @@ static inline void dccp_event_ack_sent(struct sock *sk)
27 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 27 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
28} 28}
29 29
30static inline void dccp_skb_entail(struct sock *sk, struct sk_buff *skb) 30static void dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
31{ 31{
32 skb_set_owner_w(skb, sk); 32 skb_set_owner_w(skb, sk);
33 WARN_ON(sk->sk_send_head); 33 WARN_ON(sk->sk_send_head);
@@ -49,7 +49,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
49 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 49 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
50 struct dccp_hdr *dh; 50 struct dccp_hdr *dh;
51 /* XXX For now we're using only 48 bits sequence numbers */ 51 /* XXX For now we're using only 48 bits sequence numbers */
52 const int dccp_header_size = sizeof(*dh) + 52 const u32 dccp_header_size = sizeof(*dh) +
53 sizeof(struct dccp_hdr_ext) + 53 sizeof(struct dccp_hdr_ext) +
54 dccp_packet_hdr_len(dcb->dccpd_type); 54 dccp_packet_hdr_len(dcb->dccpd_type);
55 int err, set_ack = 1; 55 int err, set_ack = 1;
@@ -64,6 +64,10 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
64 case DCCP_PKT_DATAACK: 64 case DCCP_PKT_DATAACK:
65 break; 65 break;
66 66
67 case DCCP_PKT_REQUEST:
68 set_ack = 0;
69 /* fall through */
70
67 case DCCP_PKT_SYNC: 71 case DCCP_PKT_SYNC:
68 case DCCP_PKT_SYNCACK: 72 case DCCP_PKT_SYNCACK:
69 ackno = dcb->dccpd_seq; 73 ackno = dcb->dccpd_seq;
@@ -79,7 +83,11 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
79 } 83 }
80 84
81 dcb->dccpd_seq = dp->dccps_gss; 85 dcb->dccpd_seq = dp->dccps_gss;
82 dccp_insert_options(sk, skb); 86
87 if (dccp_insert_options(sk, skb)) {
88 kfree_skb(skb);
89 return -EPROTO;
90 }
83 91
84 skb->h.raw = skb_push(skb, dccp_header_size); 92 skb->h.raw = skb_push(skb, dccp_header_size);
85 dh = dccp_hdr(skb); 93 dh = dccp_hdr(skb);
@@ -275,17 +283,16 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
275{ 283{
276 struct dccp_hdr *dh; 284 struct dccp_hdr *dh;
277 struct dccp_request_sock *dreq; 285 struct dccp_request_sock *dreq;
278 const int dccp_header_size = sizeof(struct dccp_hdr) + 286 const u32 dccp_header_size = sizeof(struct dccp_hdr) +
279 sizeof(struct dccp_hdr_ext) + 287 sizeof(struct dccp_hdr_ext) +
280 sizeof(struct dccp_hdr_response); 288 sizeof(struct dccp_hdr_response);
281 struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN + 289 struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
282 dccp_header_size, 1,
283 GFP_ATOMIC); 290 GFP_ATOMIC);
284 if (skb == NULL) 291 if (skb == NULL)
285 return NULL; 292 return NULL;
286 293
287 /* Reserve space for headers. */ 294 /* Reserve space for headers. */
288 skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size); 295 skb_reserve(skb, sk->sk_prot->max_header);
289 296
290 skb->dst = dst_clone(dst); 297 skb->dst = dst_clone(dst);
291 skb->csum = 0; 298 skb->csum = 0;
@@ -293,7 +300,11 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
293 dreq = dccp_rsk(req); 300 dreq = dccp_rsk(req);
294 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; 301 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
295 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss; 302 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss;
296 dccp_insert_options(sk, skb); 303
304 if (dccp_insert_options(sk, skb)) {
305 kfree_skb(skb);
306 return NULL;
307 }
297 308
298 skb->h.raw = skb_push(skb, dccp_header_size); 309 skb->h.raw = skb_push(skb, dccp_header_size);
299 310
@@ -310,32 +321,28 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
310 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr); 321 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr);
311 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; 322 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
312 323
313 dh->dccph_checksum = dccp_v4_checksum(skb, inet_rsk(req)->loc_addr,
314 inet_rsk(req)->rmt_addr);
315
316 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 324 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
317 return skb; 325 return skb;
318} 326}
319 327
320EXPORT_SYMBOL_GPL(dccp_make_response); 328EXPORT_SYMBOL_GPL(dccp_make_response);
321 329
322struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, 330static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst,
323 const enum dccp_reset_codes code) 331 const enum dccp_reset_codes code)
324 332
325{ 333{
326 struct dccp_hdr *dh; 334 struct dccp_hdr *dh;
327 struct dccp_sock *dp = dccp_sk(sk); 335 struct dccp_sock *dp = dccp_sk(sk);
328 const int dccp_header_size = sizeof(struct dccp_hdr) + 336 const u32 dccp_header_size = sizeof(struct dccp_hdr) +
329 sizeof(struct dccp_hdr_ext) + 337 sizeof(struct dccp_hdr_ext) +
330 sizeof(struct dccp_hdr_reset); 338 sizeof(struct dccp_hdr_reset);
331 struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN + 339 struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
332 dccp_header_size, 1,
333 GFP_ATOMIC); 340 GFP_ATOMIC);
334 if (skb == NULL) 341 if (skb == NULL)
335 return NULL; 342 return NULL;
336 343
337 /* Reserve space for headers. */ 344 /* Reserve space for headers. */
338 skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size); 345 skb_reserve(skb, sk->sk_prot->max_header);
339 346
340 skb->dst = dst_clone(dst); 347 skb->dst = dst_clone(dst);
341 skb->csum = 0; 348 skb->csum = 0;
@@ -345,7 +352,11 @@ struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst,
345 DCCP_SKB_CB(skb)->dccpd_reset_code = code; 352 DCCP_SKB_CB(skb)->dccpd_reset_code = code;
346 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET; 353 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
347 DCCP_SKB_CB(skb)->dccpd_seq = dp->dccps_gss; 354 DCCP_SKB_CB(skb)->dccpd_seq = dp->dccps_gss;
348 dccp_insert_options(sk, skb); 355
356 if (dccp_insert_options(sk, skb)) {
357 kfree_skb(skb);
358 return NULL;
359 }
349 360
350 skb->h.raw = skb_push(skb, dccp_header_size); 361 skb->h.raw = skb_push(skb, dccp_header_size);
351 362
@@ -362,14 +373,34 @@ struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst,
362 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr); 373 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr);
363 374
364 dccp_hdr_reset(skb)->dccph_reset_code = code; 375 dccp_hdr_reset(skb)->dccph_reset_code = code;
365 376 inet_csk(sk)->icsk_af_ops->send_check(sk, skb->len, skb);
366 dh->dccph_checksum = dccp_v4_checksum(skb, inet_sk(sk)->saddr,
367 inet_sk(sk)->daddr);
368 377
369 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 378 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
370 return skb; 379 return skb;
371} 380}
372 381
382int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
383{
384 /*
385 * FIXME: what if rebuild_header fails?
386 * Should we be doing a rebuild_header here?
387 */
388 int err = inet_sk_rebuild_header(sk);
389
390 if (err == 0) {
391 struct sk_buff *skb = dccp_make_reset(sk, sk->sk_dst_cache,
392 code);
393 if (skb != NULL) {
394 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
395 err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, 0);
396 if (err == NET_XMIT_CN)
397 err = 0;
398 }
399 }
400
401 return err;
402}
403
373/* 404/*
374 * Do all connect socket setups that can be done AF independent. 405 * Do all connect socket setups that can be done AF independent.
375 */ 406 */
@@ -405,12 +436,12 @@ int dccp_connect(struct sock *sk)
405 436
406 dccp_connect_init(sk); 437 dccp_connect_init(sk);
407 438
408 skb = alloc_skb(MAX_DCCP_HEADER + 15, sk->sk_allocation); 439 skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
409 if (unlikely(skb == NULL)) 440 if (unlikely(skb == NULL))
410 return -ENOBUFS; 441 return -ENOBUFS;
411 442
412 /* Reserve space for headers. */ 443 /* Reserve space for headers. */
413 skb_reserve(skb, MAX_DCCP_HEADER); 444 skb_reserve(skb, sk->sk_prot->max_header);
414 445
415 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST; 446 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
416 skb->csum = 0; 447 skb->csum = 0;
@@ -431,7 +462,8 @@ void dccp_send_ack(struct sock *sk)
431{ 462{
432 /* If we have been reset, we may not send again. */ 463 /* If we have been reset, we may not send again. */
433 if (sk->sk_state != DCCP_CLOSED) { 464 if (sk->sk_state != DCCP_CLOSED) {
434 struct sk_buff *skb = alloc_skb(MAX_DCCP_HEADER, GFP_ATOMIC); 465 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
466 GFP_ATOMIC);
435 467
436 if (skb == NULL) { 468 if (skb == NULL) {
437 inet_csk_schedule_ack(sk); 469 inet_csk_schedule_ack(sk);
@@ -443,7 +475,7 @@ void dccp_send_ack(struct sock *sk)
443 } 475 }
444 476
445 /* Reserve space for headers */ 477 /* Reserve space for headers */
446 skb_reserve(skb, MAX_DCCP_HEADER); 478 skb_reserve(skb, sk->sk_prot->max_header);
447 skb->csum = 0; 479 skb->csum = 0;
448 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK; 480 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
449 dccp_transmit_skb(sk, skb); 481 dccp_transmit_skb(sk, skb);
@@ -490,14 +522,14 @@ void dccp_send_sync(struct sock *sk, const u64 seq,
490 * dccp_transmit_skb() will set the ownership to this 522 * dccp_transmit_skb() will set the ownership to this
491 * sock. 523 * sock.
492 */ 524 */
493 struct sk_buff *skb = alloc_skb(MAX_DCCP_HEADER, GFP_ATOMIC); 525 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
494 526
495 if (skb == NULL) 527 if (skb == NULL)
496 /* FIXME: how to make sure the sync is sent? */ 528 /* FIXME: how to make sure the sync is sent? */
497 return; 529 return;
498 530
499 /* Reserve space for headers and prepare control bits. */ 531 /* Reserve space for headers and prepare control bits. */
500 skb_reserve(skb, MAX_DCCP_HEADER); 532 skb_reserve(skb, sk->sk_prot->max_header);
501 skb->csum = 0; 533 skb->csum = 0;
502 DCCP_SKB_CB(skb)->dccpd_type = pkt_type; 534 DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
503 DCCP_SKB_CB(skb)->dccpd_seq = seq; 535 DCCP_SKB_CB(skb)->dccpd_seq = seq;
@@ -505,6 +537,8 @@ void dccp_send_sync(struct sock *sk, const u64 seq,
505 dccp_transmit_skb(sk, skb); 537 dccp_transmit_skb(sk, skb);
506} 538}
507 539
540EXPORT_SYMBOL_GPL(dccp_send_sync);
541
508/* 542/*
509 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This 543 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
510 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under 544 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under