aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp/ipv4.c
diff options
context:
space:
mode:
authorGerrit Renker <gerrit@erg.abdn.ac.uk>2006-11-10 09:52:36 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-12-03 00:21:59 -0500
commit3d2fe62b8d8522722c4fe46b8af13520b73848c4 (patch)
tree8189632c135a89943a81dc27b5728ede25a295bc /net/dccp/ipv4.c
parent8a73cd09d96aa01743316657fc4e6864fe79b703 (diff)
[DCCPv4]: remove forward declarations in ipv4.c
This relates to Arnaldo's announcement in http://www.mail-archive.com/dccp@vger.kernel.org/msg00604.html Originally this had been part of the Oops fix and is a revised variant of http://www.mail-archive.com/dccp@vger.kernel.org/msg00598.html No code change, merely reshuffling, with the particular objective of having all request_sock_ops close(r) together for more clarity. Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk> Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Diffstat (limited to 'net/dccp/ipv4.c')
-rw-r--r--net/dccp/ipv4.c266
1 files changed, 132 insertions, 134 deletions
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 8dd9f5aa27a2..ed6202652bcc 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -193,37 +193,6 @@ static inline void dccp_do_pmtu_discovery(struct sock *sk,
193 } /* else let the usual retransmit timer handle it */ 193 } /* else let the usual retransmit timer handle it */
194} 194}
195 195
196static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
197 struct dst_entry *dst)
198{
199 int err = -1;
200 struct sk_buff *skb;
201
202 /* First, grab a route. */
203
204 if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
205 goto out;
206
207 skb = dccp_make_response(sk, dst, req);
208 if (skb != NULL) {
209 const struct inet_request_sock *ireq = inet_rsk(req);
210 struct dccp_hdr *dh = dccp_hdr(skb);
211
212 dh->dccph_checksum = dccp_v4_checksum(skb, ireq->loc_addr,
213 ireq->rmt_addr);
214 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
215 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
216 ireq->rmt_addr,
217 ireq->opt);
218 if (err == NET_XMIT_CN)
219 err = 0;
220 }
221
222out:
223 dst_release(dst);
224 return err;
225}
226
227/* 196/*
228 * This routine is called by the ICMP module when it gets some sort of error 197 * This routine is called by the ICMP module when it gets some sort of error
229 * condition. If err < 0 then the socket should be closed and the error 198 * condition. If err < 0 then the socket should be closed and the error
@@ -400,95 +369,6 @@ static inline u64 dccp_v4_init_sequence(const struct sock *sk,
400 dccp_hdr(skb)->dccph_sport); 369 dccp_hdr(skb)->dccph_sport);
401} 370}
402 371
403static struct request_sock_ops dccp_request_sock_ops;
404
405int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
406{
407 struct inet_request_sock *ireq;
408 struct dccp_sock dp;
409 struct request_sock *req;
410 struct dccp_request_sock *dreq;
411 const __be32 saddr = skb->nh.iph->saddr;
412 const __be32 daddr = skb->nh.iph->daddr;
413 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
414 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
415 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
416
417 /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */
418 if (((struct rtable *)skb->dst)->rt_flags &
419 (RTCF_BROADCAST | RTCF_MULTICAST)) {
420 reset_code = DCCP_RESET_CODE_NO_CONNECTION;
421 goto drop;
422 }
423
424 if (dccp_bad_service_code(sk, service)) {
425 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
426 goto drop;
427 }
428 /*
429 * TW buckets are converted to open requests without
430 * limitations, they conserve resources and peer is
431 * evidently real one.
432 */
433 if (inet_csk_reqsk_queue_is_full(sk))
434 goto drop;
435
436 /*
437 * Accept backlog is full. If we have already queued enough
438 * of warm entries in syn queue, drop request. It is better than
439 * clogging syn queue with openreqs with exponentially increasing
440 * timeout.
441 */
442 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
443 goto drop;
444
445 req = reqsk_alloc(&dccp_request_sock_ops);
446 if (req == NULL)
447 goto drop;
448
449 if (dccp_parse_options(sk, skb))
450 goto drop_and_free;
451
452 dccp_openreq_init(req, &dp, skb);
453
454 if (security_inet_conn_request(sk, skb, req))
455 goto drop_and_free;
456
457 ireq = inet_rsk(req);
458 ireq->loc_addr = daddr;
459 ireq->rmt_addr = saddr;
460 req->rcv_wnd = dccp_feat_default_sequence_window;
461 ireq->opt = NULL;
462
463 /*
464 * Step 3: Process LISTEN state
465 *
466 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
467 *
468 * In fact we defer setting S.GSR, S.SWL, S.SWH to
469 * dccp_create_openreq_child.
470 */
471 dreq = dccp_rsk(req);
472 dreq->dreq_isr = dcb->dccpd_seq;
473 dreq->dreq_iss = dccp_v4_init_sequence(sk, skb);
474 dreq->dreq_service = service;
475
476 if (dccp_v4_send_response(sk, req, NULL))
477 goto drop_and_free;
478
479 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
480 return 0;
481
482drop_and_free:
483 reqsk_free(req);
484drop:
485 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
486 dcb->dccpd_reset_code = reset_code;
487 return -1;
488}
489
490EXPORT_SYMBOL_GPL(dccp_v4_conn_request);
491
492/* 372/*
493 * The three way handshake has completed - we got a valid ACK or DATAACK - 373 * The three way handshake has completed - we got a valid ACK or DATAACK -
494 * now create the new socket. 374 * now create the new socket.
@@ -640,6 +520,37 @@ static struct dst_entry* dccp_v4_route_skb(struct sock *sk,
640 return &rt->u.dst; 520 return &rt->u.dst;
641} 521}
642 522
523static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
524 struct dst_entry *dst)
525{
526 int err = -1;
527 struct sk_buff *skb;
528
529 /* First, grab a route. */
530
531 if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
532 goto out;
533
534 skb = dccp_make_response(sk, dst, req);
535 if (skb != NULL) {
536 const struct inet_request_sock *ireq = inet_rsk(req);
537 struct dccp_hdr *dh = dccp_hdr(skb);
538
539 dh->dccph_checksum = dccp_v4_checksum(skb, ireq->loc_addr,
540 ireq->rmt_addr);
541 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
542 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
543 ireq->rmt_addr,
544 ireq->opt);
545 if (err == NET_XMIT_CN)
546 err = 0;
547 }
548
549out:
550 dst_release(dst);
551 return err;
552}
553
643static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb) 554static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb)
644{ 555{
645 int err; 556 int err;
@@ -708,6 +619,107 @@ out:
708 dst_release(dst); 619 dst_release(dst);
709} 620}
710 621
622static void dccp_v4_reqsk_destructor(struct request_sock *req)
623{
624 kfree(inet_rsk(req)->opt);
625}
626
627static struct request_sock_ops dccp_request_sock_ops __read_mostly = {
628 .family = PF_INET,
629 .obj_size = sizeof(struct dccp_request_sock),
630 .rtx_syn_ack = dccp_v4_send_response,
631 .send_ack = dccp_reqsk_send_ack,
632 .destructor = dccp_v4_reqsk_destructor,
633 .send_reset = dccp_v4_ctl_send_reset,
634};
635
636int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
637{
638 struct inet_request_sock *ireq;
639 struct dccp_sock dp;
640 struct request_sock *req;
641 struct dccp_request_sock *dreq;
642 const __be32 saddr = skb->nh.iph->saddr;
643 const __be32 daddr = skb->nh.iph->daddr;
644 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
645 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
646 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
647
648 /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */
649 if (((struct rtable *)skb->dst)->rt_flags &
650 (RTCF_BROADCAST | RTCF_MULTICAST)) {
651 reset_code = DCCP_RESET_CODE_NO_CONNECTION;
652 goto drop;
653 }
654
655 if (dccp_bad_service_code(sk, service)) {
656 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
657 goto drop;
658 }
659 /*
660 * TW buckets are converted to open requests without
661 * limitations, they conserve resources and peer is
662 * evidently real one.
663 */
664 if (inet_csk_reqsk_queue_is_full(sk))
665 goto drop;
666
667 /*
668 * Accept backlog is full. If we have already queued enough
669 * of warm entries in syn queue, drop request. It is better than
670 * clogging syn queue with openreqs with exponentially increasing
671 * timeout.
672 */
673 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
674 goto drop;
675
676 req = reqsk_alloc(&dccp_request_sock_ops);
677 if (req == NULL)
678 goto drop;
679
680 if (dccp_parse_options(sk, skb))
681 goto drop_and_free;
682
683 dccp_openreq_init(req, &dp, skb);
684
685 if (security_inet_conn_request(sk, skb, req))
686 goto drop_and_free;
687
688 ireq = inet_rsk(req);
689 ireq->loc_addr = daddr;
690 ireq->rmt_addr = saddr;
691 req->rcv_wnd = dccp_feat_default_sequence_window;
692 ireq->opt = NULL;
693
694 /*
695 * Step 3: Process LISTEN state
696 *
697 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
698 *
699 * In fact we defer setting S.GSR, S.SWL, S.SWH to
700 * dccp_create_openreq_child.
701 */
702 dreq = dccp_rsk(req);
703 dreq->dreq_isr = dcb->dccpd_seq;
704 dreq->dreq_iss = dccp_v4_init_sequence(sk, skb);
705 dreq->dreq_service = service;
706
707 if (dccp_v4_send_response(sk, req, NULL))
708 goto drop_and_free;
709
710 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
711 return 0;
712
713drop_and_free:
714 reqsk_free(req);
715drop:
716 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
717 dcb->dccpd_reset_code = reset_code;
718 return -1;
719}
720
721EXPORT_SYMBOL_GPL(dccp_v4_conn_request);
722
711int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) 723int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
712{ 724{
713 struct dccp_hdr *dh = dccp_hdr(skb); 725 struct dccp_hdr *dh = dccp_hdr(skb);
@@ -959,20 +971,6 @@ static int dccp_v4_init_sock(struct sock *sk)
959 return err; 971 return err;
960} 972}
961 973
962static void dccp_v4_reqsk_destructor(struct request_sock *req)
963{
964 kfree(inet_rsk(req)->opt);
965}
966
967static struct request_sock_ops dccp_request_sock_ops __read_mostly = {
968 .family = PF_INET,
969 .obj_size = sizeof(struct dccp_request_sock),
970 .rtx_syn_ack = dccp_v4_send_response,
971 .send_ack = dccp_reqsk_send_ack,
972 .destructor = dccp_v4_reqsk_destructor,
973 .send_reset = dccp_v4_ctl_send_reset,
974};
975
976static struct timewait_sock_ops dccp_timewait_sock_ops = { 974static struct timewait_sock_ops dccp_timewait_sock_ops = {
977 .twsk_obj_size = sizeof(struct inet_timewait_sock), 975 .twsk_obj_size = sizeof(struct inet_timewait_sock),
978}; 976};