aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp/ipv4.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/dccp/ipv4.c')
-rw-r--r--net/dccp/ipv4.c537
1 files changed, 228 insertions, 309 deletions
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index e08e7688a263..ff81679c9f17 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -113,13 +113,8 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
113 /* OK, now commit destination to socket. */ 113 /* OK, now commit destination to socket. */
114 sk_setup_caps(sk, &rt->u.dst); 114 sk_setup_caps(sk, &rt->u.dst);
115 115
116 dp->dccps_gar = 116 dp->dccps_iss = secure_dccp_sequence_number(inet->saddr, inet->daddr,
117 dp->dccps_iss = secure_dccp_sequence_number(inet->saddr, 117 inet->sport, inet->dport);
118 inet->daddr,
119 inet->sport,
120 usin->sin_port);
121 dccp_update_gss(sk, dp->dccps_iss);
122
123 inet->id = dp->dccps_iss ^ jiffies; 118 inet->id = dp->dccps_iss ^ jiffies;
124 119
125 err = dccp_connect(sk); 120 err = dccp_connect(sk);
@@ -193,86 +188,6 @@ static inline void dccp_do_pmtu_discovery(struct sock *sk,
193 } /* else let the usual retransmit timer handle it */ 188 } /* else let the usual retransmit timer handle it */
194} 189}
195 190
196static void dccp_v4_reqsk_send_ack(struct sk_buff *rxskb,
197 struct request_sock *req)
198{
199 int err;
200 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
201 const u32 dccp_hdr_ack_len = sizeof(struct dccp_hdr) +
202 sizeof(struct dccp_hdr_ext) +
203 sizeof(struct dccp_hdr_ack_bits);
204 struct sk_buff *skb;
205
206 if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL)
207 return;
208
209 skb = alloc_skb(dccp_v4_ctl_socket->sk->sk_prot->max_header, GFP_ATOMIC);
210 if (skb == NULL)
211 return;
212
213 /* Reserve space for headers. */
214 skb_reserve(skb, dccp_v4_ctl_socket->sk->sk_prot->max_header);
215
216 skb->dst = dst_clone(rxskb->dst);
217
218 skb->h.raw = skb_push(skb, dccp_hdr_ack_len);
219 dh = dccp_hdr(skb);
220 memset(dh, 0, dccp_hdr_ack_len);
221
222 /* Build DCCP header and checksum it. */
223 dh->dccph_type = DCCP_PKT_ACK;
224 dh->dccph_sport = rxdh->dccph_dport;
225 dh->dccph_dport = rxdh->dccph_sport;
226 dh->dccph_doff = dccp_hdr_ack_len / 4;
227 dh->dccph_x = 1;
228
229 dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq);
230 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
231 DCCP_SKB_CB(rxskb)->dccpd_seq);
232
233 bh_lock_sock(dccp_v4_ctl_socket->sk);
234 err = ip_build_and_send_pkt(skb, dccp_v4_ctl_socket->sk,
235 rxskb->nh.iph->daddr,
236 rxskb->nh.iph->saddr, NULL);
237 bh_unlock_sock(dccp_v4_ctl_socket->sk);
238
239 if (err == NET_XMIT_CN || err == 0) {
240 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
241 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
242 }
243}
244
245static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
246 struct dst_entry *dst)
247{
248 int err = -1;
249 struct sk_buff *skb;
250
251 /* First, grab a route. */
252
253 if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
254 goto out;
255
256 skb = dccp_make_response(sk, dst, req);
257 if (skb != NULL) {
258 const struct inet_request_sock *ireq = inet_rsk(req);
259 struct dccp_hdr *dh = dccp_hdr(skb);
260
261 dh->dccph_checksum = dccp_v4_checksum(skb, ireq->loc_addr,
262 ireq->rmt_addr);
263 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
264 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
265 ireq->rmt_addr,
266 ireq->opt);
267 if (err == NET_XMIT_CN)
268 err = 0;
269 }
270
271out:
272 dst_release(dst);
273 return err;
274}
275
276/* 191/*
277 * This routine is called by the ICMP module when it gets some sort of error 192 * This routine is called by the ICMP module when it gets some sort of error
278 * condition. If err < 0 then the socket should be closed and the error 193 * condition. If err < 0 then the socket should be closed and the error
@@ -329,7 +244,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
329 seq = dccp_hdr_seq(skb); 244 seq = dccp_hdr_seq(skb);
330 if (sk->sk_state != DCCP_LISTEN && 245 if (sk->sk_state != DCCP_LISTEN &&
331 !between48(seq, dp->dccps_swl, dp->dccps_swh)) { 246 !between48(seq, dp->dccps_swl, dp->dccps_swh)) {
332 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS); 247 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
333 goto out; 248 goto out;
334 } 249 }
335 250
@@ -429,19 +344,24 @@ out:
429 sock_put(sk); 344 sock_put(sk);
430} 345}
431 346
432/* This routine computes an IPv4 DCCP checksum. */ 347static inline __sum16 dccp_v4_csum_finish(struct sk_buff *skb,
433void dccp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) 348 __be32 src, __be32 dst)
349{
350 return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum);
351}
352
353void dccp_v4_send_check(struct sock *sk, int unused, struct sk_buff *skb)
434{ 354{
435 const struct inet_sock *inet = inet_sk(sk); 355 const struct inet_sock *inet = inet_sk(sk);
436 struct dccp_hdr *dh = dccp_hdr(skb); 356 struct dccp_hdr *dh = dccp_hdr(skb);
437 357
438 dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr, inet->daddr); 358 dccp_csum_outgoing(skb);
359 dh->dccph_checksum = dccp_v4_csum_finish(skb, inet->saddr, inet->daddr);
439} 360}
440 361
441EXPORT_SYMBOL_GPL(dccp_v4_send_check); 362EXPORT_SYMBOL_GPL(dccp_v4_send_check);
442 363
443static inline u64 dccp_v4_init_sequence(const struct sock *sk, 364static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb)
444 const struct sk_buff *skb)
445{ 365{
446 return secure_dccp_sequence_number(skb->nh.iph->daddr, 366 return secure_dccp_sequence_number(skb->nh.iph->daddr,
447 skb->nh.iph->saddr, 367 skb->nh.iph->saddr,
@@ -449,95 +369,6 @@ static inline u64 dccp_v4_init_sequence(const struct sock *sk,
449 dccp_hdr(skb)->dccph_sport); 369 dccp_hdr(skb)->dccph_sport);
450} 370}
451 371
452static struct request_sock_ops dccp_request_sock_ops;
453
454int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
455{
456 struct inet_request_sock *ireq;
457 struct dccp_sock dp;
458 struct request_sock *req;
459 struct dccp_request_sock *dreq;
460 const __be32 saddr = skb->nh.iph->saddr;
461 const __be32 daddr = skb->nh.iph->daddr;
462 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
463 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
464 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
465
466 /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */
467 if (((struct rtable *)skb->dst)->rt_flags &
468 (RTCF_BROADCAST | RTCF_MULTICAST)) {
469 reset_code = DCCP_RESET_CODE_NO_CONNECTION;
470 goto drop;
471 }
472
473 if (dccp_bad_service_code(sk, service)) {
474 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
475 goto drop;
476 }
477 /*
478 * TW buckets are converted to open requests without
479 * limitations, they conserve resources and peer is
480 * evidently real one.
481 */
482 if (inet_csk_reqsk_queue_is_full(sk))
483 goto drop;
484
485 /*
486 * Accept backlog is full. If we have already queued enough
487 * of warm entries in syn queue, drop request. It is better than
488 * clogging syn queue with openreqs with exponentially increasing
489 * timeout.
490 */
491 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
492 goto drop;
493
494 req = reqsk_alloc(&dccp_request_sock_ops);
495 if (req == NULL)
496 goto drop;
497
498 if (dccp_parse_options(sk, skb))
499 goto drop_and_free;
500
501 dccp_openreq_init(req, &dp, skb);
502
503 if (security_inet_conn_request(sk, skb, req))
504 goto drop_and_free;
505
506 ireq = inet_rsk(req);
507 ireq->loc_addr = daddr;
508 ireq->rmt_addr = saddr;
509 req->rcv_wnd = dccp_feat_default_sequence_window;
510 ireq->opt = NULL;
511
512 /*
513 * Step 3: Process LISTEN state
514 *
515 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
516 *
517 * In fact we defer setting S.GSR, S.SWL, S.SWH to
518 * dccp_create_openreq_child.
519 */
520 dreq = dccp_rsk(req);
521 dreq->dreq_isr = dcb->dccpd_seq;
522 dreq->dreq_iss = dccp_v4_init_sequence(sk, skb);
523 dreq->dreq_service = service;
524
525 if (dccp_v4_send_response(sk, req, NULL))
526 goto drop_and_free;
527
528 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
529 return 0;
530
531drop_and_free:
532 reqsk_free(req);
533drop:
534 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
535 dcb->dccpd_reset_code = reset_code;
536 return -1;
537}
538
539EXPORT_SYMBOL_GPL(dccp_v4_conn_request);
540
541/* 372/*
542 * The three way handshake has completed - we got a valid ACK or DATAACK - 373 * The three way handshake has completed - we got a valid ACK or DATAACK -
543 * now create the new socket. 374 * now create the new socket.
@@ -623,47 +454,6 @@ static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
623 return sk; 454 return sk;
624} 455}
625 456
626int dccp_v4_checksum(const struct sk_buff *skb, const __be32 saddr,
627 const __be32 daddr)
628{
629 const struct dccp_hdr* dh = dccp_hdr(skb);
630 int checksum_len;
631 u32 tmp;
632
633 if (dh->dccph_cscov == 0)
634 checksum_len = skb->len;
635 else {
636 checksum_len = (dh->dccph_cscov + dh->dccph_x) * sizeof(u32);
637 checksum_len = checksum_len < skb->len ? checksum_len :
638 skb->len;
639 }
640
641 tmp = csum_partial((unsigned char *)dh, checksum_len, 0);
642 return csum_tcpudp_magic(saddr, daddr, checksum_len,
643 IPPROTO_DCCP, tmp);
644}
645
646EXPORT_SYMBOL_GPL(dccp_v4_checksum);
647
648static int dccp_v4_verify_checksum(struct sk_buff *skb,
649 const __be32 saddr, const __be32 daddr)
650{
651 struct dccp_hdr *dh = dccp_hdr(skb);
652 int checksum_len;
653 u32 tmp;
654
655 if (dh->dccph_cscov == 0)
656 checksum_len = skb->len;
657 else {
658 checksum_len = (dh->dccph_cscov + dh->dccph_x) * sizeof(u32);
659 checksum_len = checksum_len < skb->len ? checksum_len :
660 skb->len;
661 }
662 tmp = csum_partial((unsigned char *)dh, checksum_len, 0);
663 return csum_tcpudp_magic(saddr, daddr, checksum_len,
664 IPPROTO_DCCP, tmp) == 0 ? 0 : -1;
665}
666
667static struct dst_entry* dccp_v4_route_skb(struct sock *sk, 457static struct dst_entry* dccp_v4_route_skb(struct sock *sk,
668 struct sk_buff *skb) 458 struct sk_buff *skb)
669{ 459{
@@ -689,7 +479,37 @@ static struct dst_entry* dccp_v4_route_skb(struct sock *sk,
689 return &rt->u.dst; 479 return &rt->u.dst;
690} 480}
691 481
692static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb) 482static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
483 struct dst_entry *dst)
484{
485 int err = -1;
486 struct sk_buff *skb;
487
488 /* First, grab a route. */
489
490 if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
491 goto out;
492
493 skb = dccp_make_response(sk, dst, req);
494 if (skb != NULL) {
495 const struct inet_request_sock *ireq = inet_rsk(req);
496 struct dccp_hdr *dh = dccp_hdr(skb);
497
498 dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->loc_addr,
499 ireq->rmt_addr);
500 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
501 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
502 ireq->rmt_addr,
503 ireq->opt);
504 err = net_xmit_eval(err);
505 }
506
507out:
508 dst_release(dst);
509 return err;
510}
511
512static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
693{ 513{
694 int err; 514 int err;
695 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; 515 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
@@ -698,7 +518,7 @@ static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb)
698 sizeof(struct dccp_hdr_reset); 518 sizeof(struct dccp_hdr_reset);
699 struct sk_buff *skb; 519 struct sk_buff *skb;
700 struct dst_entry *dst; 520 struct dst_entry *dst;
701 u64 seqno; 521 u64 seqno = 0;
702 522
703 /* Never send a reset in response to a reset. */ 523 /* Never send a reset in response to a reset. */
704 if (rxdh->dccph_type == DCCP_PKT_RESET) 524 if (rxdh->dccph_type == DCCP_PKT_RESET)
@@ -720,9 +540,7 @@ static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb)
720 skb_reserve(skb, dccp_v4_ctl_socket->sk->sk_prot->max_header); 540 skb_reserve(skb, dccp_v4_ctl_socket->sk->sk_prot->max_header);
721 skb->dst = dst_clone(dst); 541 skb->dst = dst_clone(dst);
722 542
723 skb->h.raw = skb_push(skb, dccp_hdr_reset_len); 543 dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
724 dh = dccp_hdr(skb);
725 memset(dh, 0, dccp_hdr_reset_len);
726 544
727 /* Build DCCP header and checksum it. */ 545 /* Build DCCP header and checksum it. */
728 dh->dccph_type = DCCP_PKT_RESET; 546 dh->dccph_type = DCCP_PKT_RESET;
@@ -734,16 +552,15 @@ static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb)
734 DCCP_SKB_CB(rxskb)->dccpd_reset_code; 552 DCCP_SKB_CB(rxskb)->dccpd_reset_code;
735 553
736 /* See "8.3.1. Abnormal Termination" in RFC 4340 */ 554 /* See "8.3.1. Abnormal Termination" in RFC 4340 */
737 seqno = 0;
738 if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) 555 if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
739 dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1); 556 dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1);
740 557
741 dccp_hdr_set_seq(dh, seqno); 558 dccp_hdr_set_seq(dh, seqno);
742 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), 559 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), DCCP_SKB_CB(rxskb)->dccpd_seq);
743 DCCP_SKB_CB(rxskb)->dccpd_seq);
744 560
745 dh->dccph_checksum = dccp_v4_checksum(skb, rxskb->nh.iph->saddr, 561 dccp_csum_outgoing(skb);
746 rxskb->nh.iph->daddr); 562 dh->dccph_checksum = dccp_v4_csum_finish(skb, rxskb->nh.iph->saddr,
563 rxskb->nh.iph->daddr);
747 564
748 bh_lock_sock(dccp_v4_ctl_socket->sk); 565 bh_lock_sock(dccp_v4_ctl_socket->sk);
749 err = ip_build_and_send_pkt(skb, dccp_v4_ctl_socket->sk, 566 err = ip_build_and_send_pkt(skb, dccp_v4_ctl_socket->sk,
@@ -751,7 +568,7 @@ static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb)
751 rxskb->nh.iph->saddr, NULL); 568 rxskb->nh.iph->saddr, NULL);
752 bh_unlock_sock(dccp_v4_ctl_socket->sk); 569 bh_unlock_sock(dccp_v4_ctl_socket->sk);
753 570
754 if (err == NET_XMIT_CN || err == 0) { 571 if (net_xmit_eval(err) == 0) {
755 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); 572 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
756 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); 573 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
757 } 574 }
@@ -759,6 +576,103 @@ out:
759 dst_release(dst); 576 dst_release(dst);
760} 577}
761 578
579static void dccp_v4_reqsk_destructor(struct request_sock *req)
580{
581 kfree(inet_rsk(req)->opt);
582}
583
584static struct request_sock_ops dccp_request_sock_ops __read_mostly = {
585 .family = PF_INET,
586 .obj_size = sizeof(struct dccp_request_sock),
587 .rtx_syn_ack = dccp_v4_send_response,
588 .send_ack = dccp_reqsk_send_ack,
589 .destructor = dccp_v4_reqsk_destructor,
590 .send_reset = dccp_v4_ctl_send_reset,
591};
592
593int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
594{
595 struct inet_request_sock *ireq;
596 struct request_sock *req;
597 struct dccp_request_sock *dreq;
598 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
599 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
600 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
601
602 /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */
603 if (((struct rtable *)skb->dst)->rt_flags &
604 (RTCF_BROADCAST | RTCF_MULTICAST)) {
605 reset_code = DCCP_RESET_CODE_NO_CONNECTION;
606 goto drop;
607 }
608
609 if (dccp_bad_service_code(sk, service)) {
610 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
611 goto drop;
612 }
613 /*
614 * TW buckets are converted to open requests without
615 * limitations, they conserve resources and peer is
616 * evidently real one.
617 */
618 if (inet_csk_reqsk_queue_is_full(sk))
619 goto drop;
620
621 /*
622 * Accept backlog is full. If we have already queued enough
623 * of warm entries in syn queue, drop request. It is better than
624 * clogging syn queue with openreqs with exponentially increasing
625 * timeout.
626 */
627 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
628 goto drop;
629
630 req = reqsk_alloc(&dccp_request_sock_ops);
631 if (req == NULL)
632 goto drop;
633
634 if (dccp_parse_options(sk, skb))
635 goto drop_and_free;
636
637 dccp_reqsk_init(req, skb);
638
639 if (security_inet_conn_request(sk, skb, req))
640 goto drop_and_free;
641
642 ireq = inet_rsk(req);
643 ireq->loc_addr = skb->nh.iph->daddr;
644 ireq->rmt_addr = skb->nh.iph->saddr;
645 ireq->opt = NULL;
646
647 /*
648 * Step 3: Process LISTEN state
649 *
650 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
651 *
652 * In fact we defer setting S.GSR, S.SWL, S.SWH to
653 * dccp_create_openreq_child.
654 */
655 dreq = dccp_rsk(req);
656 dreq->dreq_isr = dcb->dccpd_seq;
657 dreq->dreq_iss = dccp_v4_init_sequence(skb);
658 dreq->dreq_service = service;
659
660 if (dccp_v4_send_response(sk, req, NULL))
661 goto drop_and_free;
662
663 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
664 return 0;
665
666drop_and_free:
667 reqsk_free(req);
668drop:
669 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
670 dcb->dccpd_reset_code = reset_code;
671 return -1;
672}
673
674EXPORT_SYMBOL_GPL(dccp_v4_conn_request);
675
762int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) 676int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
763{ 677{
764 struct dccp_hdr *dh = dccp_hdr(skb); 678 struct dccp_hdr *dh = dccp_hdr(skb);
@@ -771,24 +685,23 @@ int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
771 685
772 /* 686 /*
773 * Step 3: Process LISTEN state 687 * Step 3: Process LISTEN state
774 * If S.state == LISTEN, 688 * If P.type == Request or P contains a valid Init Cookie option,
775 * If P.type == Request or P contains a valid Init Cookie 689 * (* Must scan the packet's options to check for Init
776 * option, 690 * Cookies. Only Init Cookies are processed here,
777 * * Must scan the packet's options to check for an Init 691 * however; other options are processed in Step 8. This
778 * Cookie. Only the Init Cookie is processed here, 692 * scan need only be performed if the endpoint uses Init
779 * however; other options are processed in Step 8. This 693 * Cookies *)
780 * scan need only be performed if the endpoint uses Init 694 * (* Generate a new socket and switch to that socket *)
781 * Cookies * 695 * Set S := new socket for this port pair
782 * * Generate a new socket and switch to that socket * 696 * S.state = RESPOND
783 * Set S := new socket for this port pair 697 * Choose S.ISS (initial seqno) or set from Init Cookies
784 * S.state = RESPOND 698 * Initialize S.GAR := S.ISS
785 * Choose S.ISS (initial seqno) or set from Init Cookie 699 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
786 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie 700 * Continue with S.state == RESPOND
787 * Continue with S.state == RESPOND 701 * (* A Response packet will be generated in Step 11 *)
788 * * A Response packet will be generated in Step 11 * 702 * Otherwise,
789 * Otherwise, 703 * Generate Reset(No Connection) unless P.type == Reset
790 * Generate Reset(No Connection) unless P.type == Reset 704 * Drop packet and return
791 * Drop packet and return
792 * 705 *
793 * NOTE: the check for the packet types is done in 706 * NOTE: the check for the packet types is done in
794 * dccp_rcv_state_process 707 * dccp_rcv_state_process
@@ -811,7 +724,7 @@ int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
811 return 0; 724 return 0;
812 725
813reset: 726reset:
814 dccp_v4_ctl_send_reset(skb); 727 dccp_v4_ctl_send_reset(sk, skb);
815discard: 728discard:
816 kfree_skb(skb); 729 kfree_skb(skb);
817 return 0; 730 return 0;
@@ -819,60 +732,74 @@ discard:
819 732
820EXPORT_SYMBOL_GPL(dccp_v4_do_rcv); 733EXPORT_SYMBOL_GPL(dccp_v4_do_rcv);
821 734
735/**
736 * dccp_invalid_packet - check for malformed packets
737 * Implements RFC 4340, 8.5: Step 1: Check header basics
738 * Packets that fail these checks are ignored and do not receive Resets.
739 */
822int dccp_invalid_packet(struct sk_buff *skb) 740int dccp_invalid_packet(struct sk_buff *skb)
823{ 741{
824 const struct dccp_hdr *dh; 742 const struct dccp_hdr *dh;
743 unsigned int cscov;
825 744
826 if (skb->pkt_type != PACKET_HOST) 745 if (skb->pkt_type != PACKET_HOST)
827 return 1; 746 return 1;
828 747
748 /* If the packet is shorter than 12 bytes, drop packet and return */
829 if (!pskb_may_pull(skb, sizeof(struct dccp_hdr))) { 749 if (!pskb_may_pull(skb, sizeof(struct dccp_hdr))) {
830 LIMIT_NETDEBUG(KERN_WARNING "DCCP: pskb_may_pull failed\n"); 750 DCCP_WARN("pskb_may_pull failed\n");
831 return 1; 751 return 1;
832 } 752 }
833 753
834 dh = dccp_hdr(skb); 754 dh = dccp_hdr(skb);
835 755
836 /* If the packet type is not understood, drop packet and return */ 756 /* If P.type is not understood, drop packet and return */
837 if (dh->dccph_type >= DCCP_PKT_INVALID) { 757 if (dh->dccph_type >= DCCP_PKT_INVALID) {
838 LIMIT_NETDEBUG(KERN_WARNING "DCCP: invalid packet type\n"); 758 DCCP_WARN("invalid packet type\n");
839 return 1; 759 return 1;
840 } 760 }
841 761
842 /* 762 /*
843 * If P.Data Offset is too small for packet type, or too large for 763 * If P.Data Offset is too small for packet type, drop packet and return
844 * packet, drop packet and return
845 */ 764 */
846 if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) { 765 if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
847 LIMIT_NETDEBUG(KERN_WARNING "DCCP: P.Data Offset(%u) " 766 DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff);
848 "too small 1\n",
849 dh->dccph_doff);
850 return 1; 767 return 1;
851 } 768 }
852 769 /*
770 * If P.Data Offset is too too large for packet, drop packet and return
771 */
853 if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) { 772 if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) {
854 LIMIT_NETDEBUG(KERN_WARNING "DCCP: P.Data Offset(%u) " 773 DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff);
855 "too small 2\n",
856 dh->dccph_doff);
857 return 1; 774 return 1;
858 } 775 }
859 776
860 dh = dccp_hdr(skb);
861
862 /* 777 /*
863 * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet 778 * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
864 * has short sequence numbers), drop packet and return 779 * has short sequence numbers), drop packet and return
865 */ 780 */
866 if (dh->dccph_x == 0 && 781 if (dh->dccph_type >= DCCP_PKT_DATA &&
867 dh->dccph_type != DCCP_PKT_DATA && 782 dh->dccph_type <= DCCP_PKT_DATAACK && dh->dccph_x == 0) {
868 dh->dccph_type != DCCP_PKT_ACK && 783 DCCP_WARN("P.type (%s) not Data || [Data]Ack, while P.X == 0\n",
869 dh->dccph_type != DCCP_PKT_DATAACK) { 784 dccp_packet_name(dh->dccph_type));
870 LIMIT_NETDEBUG(KERN_WARNING "DCCP: P.type (%s) not Data, Ack "
871 "nor DataAck and P.X == 0\n",
872 dccp_packet_name(dh->dccph_type));
873 return 1; 785 return 1;
874 } 786 }
875 787
788 /*
789 * If P.CsCov is too large for the packet size, drop packet and return.
790 * This must come _before_ checksumming (not as RFC 4340 suggests).
791 */
792 cscov = dccp_csum_coverage(skb);
793 if (cscov > skb->len) {
794 DCCP_WARN("P.CsCov %u exceeds packet length %d\n",
795 dh->dccph_cscov, skb->len);
796 return 1;
797 }
798
799 /* If header checksum is incorrect, drop packet and return.
800 * (This step is completed in the AF-dependent functions.) */
801 skb->csum = skb_checksum(skb, 0, cscov, 0);
802
876 return 0; 803 return 0;
877} 804}
878 805
@@ -883,17 +810,16 @@ static int dccp_v4_rcv(struct sk_buff *skb)
883{ 810{
884 const struct dccp_hdr *dh; 811 const struct dccp_hdr *dh;
885 struct sock *sk; 812 struct sock *sk;
813 int min_cov;
886 814
887 /* Step 1: Check header basics: */ 815 /* Step 1: Check header basics */
888 816
889 if (dccp_invalid_packet(skb)) 817 if (dccp_invalid_packet(skb))
890 goto discard_it; 818 goto discard_it;
891 819
892 /* If the header checksum is incorrect, drop packet and return */ 820 /* Step 1: If header checksum is incorrect, drop packet and return */
893 if (dccp_v4_verify_checksum(skb, skb->nh.iph->saddr, 821 if (dccp_v4_csum_finish(skb, skb->nh.iph->saddr, skb->nh.iph->daddr)) {
894 skb->nh.iph->daddr) < 0) { 822 DCCP_WARN("dropped packet with invalid checksum\n");
895 LIMIT_NETDEBUG(KERN_WARNING "%s: incorrect header checksum\n",
896 __FUNCTION__);
897 goto discard_it; 823 goto discard_it;
898 } 824 }
899 825
@@ -915,8 +841,7 @@ static int dccp_v4_rcv(struct sk_buff *skb)
915 dccp_pr_debug_cat("\n"); 841 dccp_pr_debug_cat("\n");
916 } else { 842 } else {
917 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb); 843 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
918 dccp_pr_debug_cat(", ack=%llu\n", 844 dccp_pr_debug_cat(", ack=%llu\n", (unsigned long long)
919 (unsigned long long)
920 DCCP_SKB_CB(skb)->dccpd_ack_seq); 845 DCCP_SKB_CB(skb)->dccpd_ack_seq);
921 } 846 }
922 847
@@ -930,8 +855,6 @@ static int dccp_v4_rcv(struct sk_buff *skb)
930 /* 855 /*
931 * Step 2: 856 * Step 2:
932 * If no socket ... 857 * If no socket ...
933 * Generate Reset(No Connection) unless P.type == Reset
934 * Drop packet and return
935 */ 858 */
936 if (sk == NULL) { 859 if (sk == NULL) {
937 dccp_pr_debug("failed to look up flow ID in table and " 860 dccp_pr_debug("failed to look up flow ID in table and "
@@ -945,45 +868,55 @@ static int dccp_v4_rcv(struct sk_buff *skb)
945 * Generate Reset(No Connection) unless P.type == Reset 868 * Generate Reset(No Connection) unless P.type == Reset
946 * Drop packet and return 869 * Drop packet and return
947 */ 870 */
948
949 if (sk->sk_state == DCCP_TIME_WAIT) { 871 if (sk->sk_state == DCCP_TIME_WAIT) {
950 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: " 872 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
951 "do_time_wait\n"); 873 inet_twsk_put(inet_twsk(sk));
952 goto do_time_wait; 874 goto no_dccp_socket;
875 }
876
877 /*
878 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
879 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted
880 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
881 */
882 min_cov = dccp_sk(sk)->dccps_pcrlen;
883 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
884 dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
885 dh->dccph_cscov, min_cov);
886 /* FIXME: "Such packets SHOULD be reported using Data Dropped
887 * options (Section 11.7) with Drop Code 0, Protocol
888 * Constraints." */
889 goto discard_and_relse;
953 } 890 }
954 891
955 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) 892 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
956 goto discard_and_relse; 893 goto discard_and_relse;
957 nf_reset(skb); 894 nf_reset(skb);
958 895
959 return sk_receive_skb(sk, skb); 896 return sk_receive_skb(sk, skb, 1);
960 897
961no_dccp_socket: 898no_dccp_socket:
962 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 899 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
963 goto discard_it; 900 goto discard_it;
964 /* 901 /*
965 * Step 2: 902 * Step 2:
903 * If no socket ...
966 * Generate Reset(No Connection) unless P.type == Reset 904 * Generate Reset(No Connection) unless P.type == Reset
967 * Drop packet and return 905 * Drop packet and return
968 */ 906 */
969 if (dh->dccph_type != DCCP_PKT_RESET) { 907 if (dh->dccph_type != DCCP_PKT_RESET) {
970 DCCP_SKB_CB(skb)->dccpd_reset_code = 908 DCCP_SKB_CB(skb)->dccpd_reset_code =
971 DCCP_RESET_CODE_NO_CONNECTION; 909 DCCP_RESET_CODE_NO_CONNECTION;
972 dccp_v4_ctl_send_reset(skb); 910 dccp_v4_ctl_send_reset(sk, skb);
973 } 911 }
974 912
975discard_it: 913discard_it:
976 /* Discard frame. */
977 kfree_skb(skb); 914 kfree_skb(skb);
978 return 0; 915 return 0;
979 916
980discard_and_relse: 917discard_and_relse:
981 sock_put(sk); 918 sock_put(sk);
982 goto discard_it; 919 goto discard_it;
983
984do_time_wait:
985 inet_twsk_put(inet_twsk(sk));
986 goto no_dccp_socket;
987} 920}
988 921
989static struct inet_connection_sock_af_ops dccp_ipv4_af_ops = { 922static struct inet_connection_sock_af_ops dccp_ipv4_af_ops = {
@@ -1017,20 +950,6 @@ static int dccp_v4_init_sock(struct sock *sk)
1017 return err; 950 return err;
1018} 951}
1019 952
1020static void dccp_v4_reqsk_destructor(struct request_sock *req)
1021{
1022 kfree(inet_rsk(req)->opt);
1023}
1024
1025static struct request_sock_ops dccp_request_sock_ops = {
1026 .family = PF_INET,
1027 .obj_size = sizeof(struct dccp_request_sock),
1028 .rtx_syn_ack = dccp_v4_send_response,
1029 .send_ack = dccp_v4_reqsk_send_ack,
1030 .destructor = dccp_v4_reqsk_destructor,
1031 .send_reset = dccp_v4_ctl_send_reset,
1032};
1033
1034static struct timewait_sock_ops dccp_timewait_sock_ops = { 953static struct timewait_sock_ops dccp_timewait_sock_ops = {
1035 .twsk_obj_size = sizeof(struct inet_timewait_sock), 954 .twsk_obj_size = sizeof(struct inet_timewait_sock),
1036}; 955};