diff options
Diffstat (limited to 'net/ipv4/tcp_minisocks.c')
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 68 |
1 files changed, 34 insertions, 34 deletions
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index eea1a17a9ac2..b3943e7562f3 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -684,7 +684,7 @@ out: | |||
684 | * Actually, we could lots of memory writes here. tp of listening | 684 | * Actually, we could lots of memory writes here. tp of listening |
685 | * socket contains all necessary default parameters. | 685 | * socket contains all necessary default parameters. |
686 | */ | 686 | */ |
687 | struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, struct sk_buff *skb) | 687 | struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb) |
688 | { | 688 | { |
689 | /* allocate the newsk from the same slab of the master sock, | 689 | /* allocate the newsk from the same slab of the master sock, |
690 | * if not, at sk_free time we'll try to free it from the wrong | 690 | * if not, at sk_free time we'll try to free it from the wrong |
@@ -692,6 +692,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, | |||
692 | struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, sk->sk_prot, 0); | 692 | struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, sk->sk_prot, 0); |
693 | 693 | ||
694 | if(newsk != NULL) { | 694 | if(newsk != NULL) { |
695 | struct inet_request_sock *ireq = inet_rsk(req); | ||
696 | struct tcp_request_sock *treq = tcp_rsk(req); | ||
695 | struct tcp_sock *newtp; | 697 | struct tcp_sock *newtp; |
696 | struct sk_filter *filter; | 698 | struct sk_filter *filter; |
697 | 699 | ||
@@ -703,7 +705,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, | |||
703 | tcp_sk(newsk)->bind_hash = NULL; | 705 | tcp_sk(newsk)->bind_hash = NULL; |
704 | 706 | ||
705 | /* Clone the TCP header template */ | 707 | /* Clone the TCP header template */ |
706 | inet_sk(newsk)->dport = req->rmt_port; | 708 | inet_sk(newsk)->dport = ireq->rmt_port; |
707 | 709 | ||
708 | sock_lock_init(newsk); | 710 | sock_lock_init(newsk); |
709 | bh_lock_sock(newsk); | 711 | bh_lock_sock(newsk); |
@@ -739,14 +741,14 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, | |||
739 | /* Now setup tcp_sock */ | 741 | /* Now setup tcp_sock */ |
740 | newtp = tcp_sk(newsk); | 742 | newtp = tcp_sk(newsk); |
741 | newtp->pred_flags = 0; | 743 | newtp->pred_flags = 0; |
742 | newtp->rcv_nxt = req->rcv_isn + 1; | 744 | newtp->rcv_nxt = treq->rcv_isn + 1; |
743 | newtp->snd_nxt = req->snt_isn + 1; | 745 | newtp->snd_nxt = treq->snt_isn + 1; |
744 | newtp->snd_una = req->snt_isn + 1; | 746 | newtp->snd_una = treq->snt_isn + 1; |
745 | newtp->snd_sml = req->snt_isn + 1; | 747 | newtp->snd_sml = treq->snt_isn + 1; |
746 | 748 | ||
747 | tcp_prequeue_init(newtp); | 749 | tcp_prequeue_init(newtp); |
748 | 750 | ||
749 | tcp_init_wl(newtp, req->snt_isn, req->rcv_isn); | 751 | tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn); |
750 | 752 | ||
751 | newtp->retransmits = 0; | 753 | newtp->retransmits = 0; |
752 | newtp->backoff = 0; | 754 | newtp->backoff = 0; |
@@ -775,10 +777,10 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, | |||
775 | tcp_set_ca_state(newtp, TCP_CA_Open); | 777 | tcp_set_ca_state(newtp, TCP_CA_Open); |
776 | tcp_init_xmit_timers(newsk); | 778 | tcp_init_xmit_timers(newsk); |
777 | skb_queue_head_init(&newtp->out_of_order_queue); | 779 | skb_queue_head_init(&newtp->out_of_order_queue); |
778 | newtp->rcv_wup = req->rcv_isn + 1; | 780 | newtp->rcv_wup = treq->rcv_isn + 1; |
779 | newtp->write_seq = req->snt_isn + 1; | 781 | newtp->write_seq = treq->snt_isn + 1; |
780 | newtp->pushed_seq = newtp->write_seq; | 782 | newtp->pushed_seq = newtp->write_seq; |
781 | newtp->copied_seq = req->rcv_isn + 1; | 783 | newtp->copied_seq = treq->rcv_isn + 1; |
782 | 784 | ||
783 | newtp->rx_opt.saw_tstamp = 0; | 785 | newtp->rx_opt.saw_tstamp = 0; |
784 | 786 | ||
@@ -788,10 +790,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, | |||
788 | newtp->probes_out = 0; | 790 | newtp->probes_out = 0; |
789 | newtp->rx_opt.num_sacks = 0; | 791 | newtp->rx_opt.num_sacks = 0; |
790 | newtp->urg_data = 0; | 792 | newtp->urg_data = 0; |
791 | newtp->listen_opt = NULL; | 793 | /* Deinitialize accept_queue to trap illegal accesses. */ |
792 | newtp->accept_queue = newtp->accept_queue_tail = NULL; | 794 | memset(&newtp->accept_queue, 0, sizeof(newtp->accept_queue)); |
793 | /* Deinitialize syn_wait_lock to trap illegal accesses. */ | ||
794 | memset(&newtp->syn_wait_lock, 0, sizeof(newtp->syn_wait_lock)); | ||
795 | 795 | ||
796 | /* Back to base struct sock members. */ | 796 | /* Back to base struct sock members. */ |
797 | newsk->sk_err = 0; | 797 | newsk->sk_err = 0; |
@@ -808,18 +808,18 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, | |||
808 | newsk->sk_socket = NULL; | 808 | newsk->sk_socket = NULL; |
809 | newsk->sk_sleep = NULL; | 809 | newsk->sk_sleep = NULL; |
810 | 810 | ||
811 | newtp->rx_opt.tstamp_ok = req->tstamp_ok; | 811 | newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; |
812 | if((newtp->rx_opt.sack_ok = req->sack_ok) != 0) { | 812 | if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) { |
813 | if (sysctl_tcp_fack) | 813 | if (sysctl_tcp_fack) |
814 | newtp->rx_opt.sack_ok |= 2; | 814 | newtp->rx_opt.sack_ok |= 2; |
815 | } | 815 | } |
816 | newtp->window_clamp = req->window_clamp; | 816 | newtp->window_clamp = req->window_clamp; |
817 | newtp->rcv_ssthresh = req->rcv_wnd; | 817 | newtp->rcv_ssthresh = req->rcv_wnd; |
818 | newtp->rcv_wnd = req->rcv_wnd; | 818 | newtp->rcv_wnd = req->rcv_wnd; |
819 | newtp->rx_opt.wscale_ok = req->wscale_ok; | 819 | newtp->rx_opt.wscale_ok = ireq->wscale_ok; |
820 | if (newtp->rx_opt.wscale_ok) { | 820 | if (newtp->rx_opt.wscale_ok) { |
821 | newtp->rx_opt.snd_wscale = req->snd_wscale; | 821 | newtp->rx_opt.snd_wscale = ireq->snd_wscale; |
822 | newtp->rx_opt.rcv_wscale = req->rcv_wscale; | 822 | newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; |
823 | } else { | 823 | } else { |
824 | newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; | 824 | newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; |
825 | newtp->window_clamp = min(newtp->window_clamp, 65535U); | 825 | newtp->window_clamp = min(newtp->window_clamp, 65535U); |
@@ -851,12 +851,12 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, | |||
851 | 851 | ||
852 | /* | 852 | /* |
853 | * Process an incoming packet for SYN_RECV sockets represented | 853 | * Process an incoming packet for SYN_RECV sockets represented |
854 | * as an open_request. | 854 | * as a request_sock. |
855 | */ | 855 | */ |
856 | 856 | ||
857 | struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | 857 | struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, |
858 | struct open_request *req, | 858 | struct request_sock *req, |
859 | struct open_request **prev) | 859 | struct request_sock **prev) |
860 | { | 860 | { |
861 | struct tcphdr *th = skb->h.th; | 861 | struct tcphdr *th = skb->h.th; |
862 | struct tcp_sock *tp = tcp_sk(sk); | 862 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -881,7 +881,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
881 | } | 881 | } |
882 | 882 | ||
883 | /* Check for pure retransmitted SYN. */ | 883 | /* Check for pure retransmitted SYN. */ |
884 | if (TCP_SKB_CB(skb)->seq == req->rcv_isn && | 884 | if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && |
885 | flg == TCP_FLAG_SYN && | 885 | flg == TCP_FLAG_SYN && |
886 | !paws_reject) { | 886 | !paws_reject) { |
887 | /* | 887 | /* |
@@ -901,7 +901,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
901 | * Enforce "SYN-ACK" according to figure 8, figure 6 | 901 | * Enforce "SYN-ACK" according to figure 8, figure 6 |
902 | * of RFC793, fixed by RFC1122. | 902 | * of RFC793, fixed by RFC1122. |
903 | */ | 903 | */ |
904 | req->class->rtx_syn_ack(sk, req, NULL); | 904 | req->rsk_ops->rtx_syn_ack(sk, req, NULL); |
905 | return NULL; | 905 | return NULL; |
906 | } | 906 | } |
907 | 907 | ||
@@ -959,7 +959,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
959 | * Invalid ACK: reset will be sent by listening socket | 959 | * Invalid ACK: reset will be sent by listening socket |
960 | */ | 960 | */ |
961 | if ((flg & TCP_FLAG_ACK) && | 961 | if ((flg & TCP_FLAG_ACK) && |
962 | (TCP_SKB_CB(skb)->ack_seq != req->snt_isn+1)) | 962 | (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1)) |
963 | return sk; | 963 | return sk; |
964 | 964 | ||
965 | /* Also, it would be not so bad idea to check rcv_tsecr, which | 965 | /* Also, it would be not so bad idea to check rcv_tsecr, which |
@@ -970,10 +970,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
970 | /* RFC793: "first check sequence number". */ | 970 | /* RFC793: "first check sequence number". */ |
971 | 971 | ||
972 | if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, | 972 | if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, |
973 | req->rcv_isn+1, req->rcv_isn+1+req->rcv_wnd)) { | 973 | tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) { |
974 | /* Out of window: send ACK and drop. */ | 974 | /* Out of window: send ACK and drop. */ |
975 | if (!(flg & TCP_FLAG_RST)) | 975 | if (!(flg & TCP_FLAG_RST)) |
976 | req->class->send_ack(skb, req); | 976 | req->rsk_ops->send_ack(skb, req); |
977 | if (paws_reject) | 977 | if (paws_reject) |
978 | NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); | 978 | NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); |
979 | return NULL; | 979 | return NULL; |
@@ -981,12 +981,12 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
981 | 981 | ||
982 | /* In sequence, PAWS is OK. */ | 982 | /* In sequence, PAWS is OK. */ |
983 | 983 | ||
984 | if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, req->rcv_isn+1)) | 984 | if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1)) |
985 | req->ts_recent = tmp_opt.rcv_tsval; | 985 | req->ts_recent = tmp_opt.rcv_tsval; |
986 | 986 | ||
987 | if (TCP_SKB_CB(skb)->seq == req->rcv_isn) { | 987 | if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { |
988 | /* Truncate SYN, it is out of window starting | 988 | /* Truncate SYN, it is out of window starting |
989 | at req->rcv_isn+1. */ | 989 | at tcp_rsk(req)->rcv_isn + 1. */ |
990 | flg &= ~TCP_FLAG_SYN; | 990 | flg &= ~TCP_FLAG_SYN; |
991 | } | 991 | } |
992 | 992 | ||
@@ -1003,8 +1003,8 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
1003 | return NULL; | 1003 | return NULL; |
1004 | 1004 | ||
1005 | /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ | 1005 | /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ |
1006 | if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == req->rcv_isn+1) { | 1006 | if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { |
1007 | req->acked = 1; | 1007 | inet_rsk(req)->acked = 1; |
1008 | return NULL; | 1008 | return NULL; |
1009 | } | 1009 | } |
1010 | 1010 | ||
@@ -1026,14 +1026,14 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
1026 | 1026 | ||
1027 | listen_overflow: | 1027 | listen_overflow: |
1028 | if (!sysctl_tcp_abort_on_overflow) { | 1028 | if (!sysctl_tcp_abort_on_overflow) { |
1029 | req->acked = 1; | 1029 | inet_rsk(req)->acked = 1; |
1030 | return NULL; | 1030 | return NULL; |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | embryonic_reset: | 1033 | embryonic_reset: |
1034 | NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); | 1034 | NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); |
1035 | if (!(flg & TCP_FLAG_RST)) | 1035 | if (!(flg & TCP_FLAG_RST)) |
1036 | req->class->send_reset(skb); | 1036 | req->rsk_ops->send_reset(skb); |
1037 | 1037 | ||
1038 | tcp_synq_drop(sk, req, prev); | 1038 | tcp_synq_drop(sk, req, prev); |
1039 | return NULL; | 1039 | return NULL; |