diff options
| -rw-r--r-- | net/dccp/ipv4.c | 11 | ||||
| -rw-r--r-- | net/dccp/ipv6.c | 23 | ||||
| -rw-r--r-- | net/dccp/output.c | 29 |
3 files changed, 28 insertions, 35 deletions
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 7098f1055f4a..64b7f8bda42d 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
| @@ -201,7 +201,7 @@ static void dccp_v4_ctl_send_ack(struct sk_buff *rxskb) | |||
| 201 | { | 201 | { |
| 202 | int err; | 202 | int err; |
| 203 | struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; | 203 | struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; |
| 204 | const int dccp_hdr_ack_len = sizeof(struct dccp_hdr) + | 204 | const u32 dccp_hdr_ack_len = sizeof(struct dccp_hdr) + |
| 205 | sizeof(struct dccp_hdr_ext) + | 205 | sizeof(struct dccp_hdr_ext) + |
| 206 | sizeof(struct dccp_hdr_ack_bits); | 206 | sizeof(struct dccp_hdr_ack_bits); |
| 207 | struct sk_buff *skb; | 207 | struct sk_buff *skb; |
| @@ -209,12 +209,12 @@ static void dccp_v4_ctl_send_ack(struct sk_buff *rxskb) | |||
| 209 | if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL) | 209 | if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL) |
| 210 | return; | 210 | return; |
| 211 | 211 | ||
| 212 | skb = alloc_skb(MAX_DCCP_HEADER + 15, GFP_ATOMIC); | 212 | skb = alloc_skb(dccp_v4_ctl_socket->sk->sk_prot->max_header, GFP_ATOMIC); |
| 213 | if (skb == NULL) | 213 | if (skb == NULL) |
| 214 | return; | 214 | return; |
| 215 | 215 | ||
| 216 | /* Reserve space for headers. */ | 216 | /* Reserve space for headers. */ |
| 217 | skb_reserve(skb, MAX_DCCP_HEADER); | 217 | skb_reserve(skb, dccp_v4_ctl_socket->sk->sk_prot->max_header); |
| 218 | 218 | ||
| 219 | skb->dst = dst_clone(rxskb->dst); | 219 | skb->dst = dst_clone(rxskb->dst); |
| 220 | 220 | ||
| @@ -715,12 +715,13 @@ static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb) | |||
| 715 | if (dst == NULL) | 715 | if (dst == NULL) |
| 716 | return; | 716 | return; |
| 717 | 717 | ||
| 718 | skb = alloc_skb(MAX_DCCP_HEADER + 15, GFP_ATOMIC); | 718 | skb = alloc_skb(dccp_v4_ctl_socket->sk->sk_prot->max_header, |
| 719 | GFP_ATOMIC); | ||
| 719 | if (skb == NULL) | 720 | if (skb == NULL) |
| 720 | goto out; | 721 | goto out; |
| 721 | 722 | ||
| 722 | /* Reserve space for headers. */ | 723 | /* Reserve space for headers. */ |
| 723 | skb_reserve(skb, MAX_DCCP_HEADER); | 724 | skb_reserve(skb, dccp_v4_ctl_socket->sk->sk_prot->max_header); |
| 724 | skb->dst = dst_clone(dst); | 725 | skb->dst = dst_clone(dst); |
| 725 | 726 | ||
| 726 | skb->h.raw = skb_push(skb, dccp_hdr_reset_len); | 727 | skb->h.raw = skb_push(skb, dccp_hdr_reset_len); |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index f28f38fd0134..0f328c753c57 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
| @@ -514,7 +514,7 @@ static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) | |||
| 514 | static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb) | 514 | static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb) |
| 515 | { | 515 | { |
| 516 | struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; | 516 | struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; |
| 517 | const int dccp_hdr_reset_len = sizeof(struct dccp_hdr) + | 517 | const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) + |
| 518 | sizeof(struct dccp_hdr_ext) + | 518 | sizeof(struct dccp_hdr_ext) + |
| 519 | sizeof(struct dccp_hdr_reset); | 519 | sizeof(struct dccp_hdr_reset); |
| 520 | struct sk_buff *skb; | 520 | struct sk_buff *skb; |
| @@ -527,18 +527,12 @@ static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb) | |||
| 527 | if (!ipv6_unicast_destination(rxskb)) | 527 | if (!ipv6_unicast_destination(rxskb)) |
| 528 | return; | 528 | return; |
| 529 | 529 | ||
| 530 | /* | 530 | skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header, |
| 531 | * We need to grab some memory, and put together an RST, | 531 | GFP_ATOMIC); |
| 532 | * and then put it into the queue to be sent. | ||
| 533 | */ | ||
| 534 | |||
| 535 | skb = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + | ||
| 536 | dccp_hdr_reset_len, GFP_ATOMIC); | ||
| 537 | if (skb == NULL) | 532 | if (skb == NULL) |
| 538 | return; | 533 | return; |
| 539 | 534 | ||
| 540 | skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr) + | 535 | skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header); |
| 541 | dccp_hdr_reset_len); | ||
| 542 | 536 | ||
| 543 | skb->h.raw = skb_push(skb, dccp_hdr_reset_len); | 537 | skb->h.raw = skb_push(skb, dccp_hdr_reset_len); |
| 544 | dh = dccp_hdr(skb); | 538 | dh = dccp_hdr(skb); |
| @@ -590,18 +584,17 @@ static void dccp_v6_ctl_send_ack(struct sk_buff *rxskb) | |||
| 590 | { | 584 | { |
| 591 | struct flowi fl; | 585 | struct flowi fl; |
| 592 | struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; | 586 | struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; |
| 593 | const int dccp_hdr_ack_len = sizeof(struct dccp_hdr) + | 587 | const u32 dccp_hdr_ack_len = sizeof(struct dccp_hdr) + |
| 594 | sizeof(struct dccp_hdr_ext) + | 588 | sizeof(struct dccp_hdr_ext) + |
| 595 | sizeof(struct dccp_hdr_ack_bits); | 589 | sizeof(struct dccp_hdr_ack_bits); |
| 596 | struct sk_buff *skb; | 590 | struct sk_buff *skb; |
| 597 | 591 | ||
| 598 | skb = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + | 592 | skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header, |
| 599 | dccp_hdr_ack_len, GFP_ATOMIC); | 593 | GFP_ATOMIC); |
| 600 | if (skb == NULL) | 594 | if (skb == NULL) |
| 601 | return; | 595 | return; |
| 602 | 596 | ||
| 603 | skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr) + | 597 | skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header); |
| 604 | dccp_hdr_ack_len); | ||
| 605 | 598 | ||
| 606 | skb->h.raw = skb_push(skb, dccp_hdr_ack_len); | 599 | skb->h.raw = skb_push(skb, dccp_hdr_ack_len); |
| 607 | dh = dccp_hdr(skb); | 600 | dh = dccp_hdr(skb); |
diff --git a/net/dccp/output.c b/net/dccp/output.c index 6f3a5f02a39a..2975e3d7a48c 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c | |||
| @@ -49,7 +49,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
| 49 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); | 49 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); |
| 50 | struct dccp_hdr *dh; | 50 | struct dccp_hdr *dh; |
| 51 | /* XXX For now we're using only 48 bits sequence numbers */ | 51 | /* XXX For now we're using only 48 bits sequence numbers */ |
| 52 | const int dccp_header_size = sizeof(*dh) + | 52 | const u32 dccp_header_size = sizeof(*dh) + |
| 53 | sizeof(struct dccp_hdr_ext) + | 53 | sizeof(struct dccp_hdr_ext) + |
| 54 | dccp_packet_hdr_len(dcb->dccpd_type); | 54 | dccp_packet_hdr_len(dcb->dccpd_type); |
| 55 | int err, set_ack = 1; | 55 | int err, set_ack = 1; |
| @@ -279,17 +279,16 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, | |||
| 279 | { | 279 | { |
| 280 | struct dccp_hdr *dh; | 280 | struct dccp_hdr *dh; |
| 281 | struct dccp_request_sock *dreq; | 281 | struct dccp_request_sock *dreq; |
| 282 | const int dccp_header_size = sizeof(struct dccp_hdr) + | 282 | const u32 dccp_header_size = sizeof(struct dccp_hdr) + |
| 283 | sizeof(struct dccp_hdr_ext) + | 283 | sizeof(struct dccp_hdr_ext) + |
| 284 | sizeof(struct dccp_hdr_response); | 284 | sizeof(struct dccp_hdr_response); |
| 285 | struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN + | 285 | struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, |
| 286 | dccp_header_size, 1, | ||
| 287 | GFP_ATOMIC); | 286 | GFP_ATOMIC); |
| 288 | if (skb == NULL) | 287 | if (skb == NULL) |
| 289 | return NULL; | 288 | return NULL; |
| 290 | 289 | ||
| 291 | /* Reserve space for headers. */ | 290 | /* Reserve space for headers. */ |
| 292 | skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size); | 291 | skb_reserve(skb, sk->sk_prot->max_header); |
| 293 | 292 | ||
| 294 | skb->dst = dst_clone(dst); | 293 | skb->dst = dst_clone(dst); |
| 295 | skb->csum = 0; | 294 | skb->csum = 0; |
| @@ -326,17 +325,16 @@ static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, | |||
| 326 | { | 325 | { |
| 327 | struct dccp_hdr *dh; | 326 | struct dccp_hdr *dh; |
| 328 | struct dccp_sock *dp = dccp_sk(sk); | 327 | struct dccp_sock *dp = dccp_sk(sk); |
| 329 | const int dccp_header_size = sizeof(struct dccp_hdr) + | 328 | const u32 dccp_header_size = sizeof(struct dccp_hdr) + |
| 330 | sizeof(struct dccp_hdr_ext) + | 329 | sizeof(struct dccp_hdr_ext) + |
| 331 | sizeof(struct dccp_hdr_reset); | 330 | sizeof(struct dccp_hdr_reset); |
| 332 | struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN + | 331 | struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, |
| 333 | dccp_header_size, 1, | ||
| 334 | GFP_ATOMIC); | 332 | GFP_ATOMIC); |
| 335 | if (skb == NULL) | 333 | if (skb == NULL) |
| 336 | return NULL; | 334 | return NULL; |
| 337 | 335 | ||
| 338 | /* Reserve space for headers. */ | 336 | /* Reserve space for headers. */ |
| 339 | skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size); | 337 | skb_reserve(skb, sk->sk_prot->max_header); |
| 340 | 338 | ||
| 341 | skb->dst = dst_clone(dst); | 339 | skb->dst = dst_clone(dst); |
| 342 | skb->csum = 0; | 340 | skb->csum = 0; |
| @@ -426,12 +424,12 @@ int dccp_connect(struct sock *sk) | |||
| 426 | 424 | ||
| 427 | dccp_connect_init(sk); | 425 | dccp_connect_init(sk); |
| 428 | 426 | ||
| 429 | skb = alloc_skb(MAX_DCCP_HEADER + 15, sk->sk_allocation); | 427 | skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation); |
| 430 | if (unlikely(skb == NULL)) | 428 | if (unlikely(skb == NULL)) |
| 431 | return -ENOBUFS; | 429 | return -ENOBUFS; |
| 432 | 430 | ||
| 433 | /* Reserve space for headers. */ | 431 | /* Reserve space for headers. */ |
| 434 | skb_reserve(skb, MAX_DCCP_HEADER); | 432 | skb_reserve(skb, sk->sk_prot->max_header); |
| 435 | 433 | ||
| 436 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST; | 434 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST; |
| 437 | skb->csum = 0; | 435 | skb->csum = 0; |
| @@ -452,7 +450,8 @@ void dccp_send_ack(struct sock *sk) | |||
| 452 | { | 450 | { |
| 453 | /* If we have been reset, we may not send again. */ | 451 | /* If we have been reset, we may not send again. */ |
| 454 | if (sk->sk_state != DCCP_CLOSED) { | 452 | if (sk->sk_state != DCCP_CLOSED) { |
| 455 | struct sk_buff *skb = alloc_skb(MAX_DCCP_HEADER, GFP_ATOMIC); | 453 | struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, |
| 454 | GFP_ATOMIC); | ||
| 456 | 455 | ||
| 457 | if (skb == NULL) { | 456 | if (skb == NULL) { |
| 458 | inet_csk_schedule_ack(sk); | 457 | inet_csk_schedule_ack(sk); |
| @@ -464,7 +463,7 @@ void dccp_send_ack(struct sock *sk) | |||
| 464 | } | 463 | } |
| 465 | 464 | ||
| 466 | /* Reserve space for headers */ | 465 | /* Reserve space for headers */ |
| 467 | skb_reserve(skb, MAX_DCCP_HEADER); | 466 | skb_reserve(skb, sk->sk_prot->max_header); |
| 468 | skb->csum = 0; | 467 | skb->csum = 0; |
| 469 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK; | 468 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK; |
| 470 | dccp_transmit_skb(sk, skb); | 469 | dccp_transmit_skb(sk, skb); |
| @@ -511,14 +510,14 @@ void dccp_send_sync(struct sock *sk, const u64 seq, | |||
| 511 | * dccp_transmit_skb() will set the ownership to this | 510 | * dccp_transmit_skb() will set the ownership to this |
| 512 | * sock. | 511 | * sock. |
| 513 | */ | 512 | */ |
| 514 | struct sk_buff *skb = alloc_skb(MAX_DCCP_HEADER, GFP_ATOMIC); | 513 | struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC); |
| 515 | 514 | ||
| 516 | if (skb == NULL) | 515 | if (skb == NULL) |
| 517 | /* FIXME: how to make sure the sync is sent? */ | 516 | /* FIXME: how to make sure the sync is sent? */ |
| 518 | return; | 517 | return; |
| 519 | 518 | ||
| 520 | /* Reserve space for headers and prepare control bits. */ | 519 | /* Reserve space for headers and prepare control bits. */ |
| 521 | skb_reserve(skb, MAX_DCCP_HEADER); | 520 | skb_reserve(skb, sk->sk_prot->max_header); |
| 522 | skb->csum = 0; | 521 | skb->csum = 0; |
| 523 | DCCP_SKB_CB(skb)->dccpd_type = pkt_type; | 522 | DCCP_SKB_CB(skb)->dccpd_type = pkt_type; |
| 524 | DCCP_SKB_CB(skb)->dccpd_seq = seq; | 523 | DCCP_SKB_CB(skb)->dccpd_seq = seq; |
