aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c375
1 files changed, 147 insertions, 228 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b224eb8bce8b..3e07a64ca44e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -88,12 +88,14 @@ int sysctl_tcp_app_win __read_mostly = 31;
88int sysctl_tcp_adv_win_scale __read_mostly = 1; 88int sysctl_tcp_adv_win_scale __read_mostly = 1;
89EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); 89EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
90 90
91/* rfc5961 challenge ack rate limiting */
92int sysctl_tcp_challenge_ack_limit = 100;
93
91int sysctl_tcp_stdurg __read_mostly; 94int sysctl_tcp_stdurg __read_mostly;
92int sysctl_tcp_rfc1337 __read_mostly; 95int sysctl_tcp_rfc1337 __read_mostly;
93int sysctl_tcp_max_orphans __read_mostly = NR_FILE; 96int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
94int sysctl_tcp_frto __read_mostly = 2; 97int sysctl_tcp_frto __read_mostly = 2;
95int sysctl_tcp_frto_response __read_mostly; 98int sysctl_tcp_frto_response __read_mostly;
96int sysctl_tcp_nometrics_save __read_mostly;
97 99
98int sysctl_tcp_thin_dupack __read_mostly; 100int sysctl_tcp_thin_dupack __read_mostly;
99 101
@@ -701,7 +703,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
701/* Calculate rto without backoff. This is the second half of Van Jacobson's 703/* Calculate rto without backoff. This is the second half of Van Jacobson's
702 * routine referred to above. 704 * routine referred to above.
703 */ 705 */
704static inline void tcp_set_rto(struct sock *sk) 706void tcp_set_rto(struct sock *sk)
705{ 707{
706 const struct tcp_sock *tp = tcp_sk(sk); 708 const struct tcp_sock *tp = tcp_sk(sk);
707 /* Old crap is replaced with new one. 8) 709 /* Old crap is replaced with new one. 8)
@@ -728,109 +730,6 @@ static inline void tcp_set_rto(struct sock *sk)
728 tcp_bound_rto(sk); 730 tcp_bound_rto(sk);
729} 731}
730 732
731/* Save metrics learned by this TCP session.
732 This function is called only, when TCP finishes successfully
733 i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE.
734 */
735void tcp_update_metrics(struct sock *sk)
736{
737 struct tcp_sock *tp = tcp_sk(sk);
738 struct dst_entry *dst = __sk_dst_get(sk);
739
740 if (sysctl_tcp_nometrics_save)
741 return;
742
743 dst_confirm(dst);
744
745 if (dst && (dst->flags & DST_HOST)) {
746 const struct inet_connection_sock *icsk = inet_csk(sk);
747 int m;
748 unsigned long rtt;
749
750 if (icsk->icsk_backoff || !tp->srtt) {
751 /* This session failed to estimate rtt. Why?
752 * Probably, no packets returned in time.
753 * Reset our results.
754 */
755 if (!(dst_metric_locked(dst, RTAX_RTT)))
756 dst_metric_set(dst, RTAX_RTT, 0);
757 return;
758 }
759
760 rtt = dst_metric_rtt(dst, RTAX_RTT);
761 m = rtt - tp->srtt;
762
763 /* If newly calculated rtt larger than stored one,
764 * store new one. Otherwise, use EWMA. Remember,
765 * rtt overestimation is always better than underestimation.
766 */
767 if (!(dst_metric_locked(dst, RTAX_RTT))) {
768 if (m <= 0)
769 set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt);
770 else
771 set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3));
772 }
773
774 if (!(dst_metric_locked(dst, RTAX_RTTVAR))) {
775 unsigned long var;
776 if (m < 0)
777 m = -m;
778
779 /* Scale deviation to rttvar fixed point */
780 m >>= 1;
781 if (m < tp->mdev)
782 m = tp->mdev;
783
784 var = dst_metric_rtt(dst, RTAX_RTTVAR);
785 if (m >= var)
786 var = m;
787 else
788 var -= (var - m) >> 2;
789
790 set_dst_metric_rtt(dst, RTAX_RTTVAR, var);
791 }
792
793 if (tcp_in_initial_slowstart(tp)) {
794 /* Slow start still did not finish. */
795 if (dst_metric(dst, RTAX_SSTHRESH) &&
796 !dst_metric_locked(dst, RTAX_SSTHRESH) &&
797 (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH))
798 dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1);
799 if (!dst_metric_locked(dst, RTAX_CWND) &&
800 tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
801 dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd);
802 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
803 icsk->icsk_ca_state == TCP_CA_Open) {
804 /* Cong. avoidance phase, cwnd is reliable. */
805 if (!dst_metric_locked(dst, RTAX_SSTHRESH))
806 dst_metric_set(dst, RTAX_SSTHRESH,
807 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
808 if (!dst_metric_locked(dst, RTAX_CWND))
809 dst_metric_set(dst, RTAX_CWND,
810 (dst_metric(dst, RTAX_CWND) +
811 tp->snd_cwnd) >> 1);
812 } else {
813 /* Else slow start did not finish, cwnd is non-sense,
814 ssthresh may be also invalid.
815 */
816 if (!dst_metric_locked(dst, RTAX_CWND))
817 dst_metric_set(dst, RTAX_CWND,
818 (dst_metric(dst, RTAX_CWND) +
819 tp->snd_ssthresh) >> 1);
820 if (dst_metric(dst, RTAX_SSTHRESH) &&
821 !dst_metric_locked(dst, RTAX_SSTHRESH) &&
822 tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH))
823 dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh);
824 }
825
826 if (!dst_metric_locked(dst, RTAX_REORDERING)) {
827 if (dst_metric(dst, RTAX_REORDERING) < tp->reordering &&
828 tp->reordering != sysctl_tcp_reordering)
829 dst_metric_set(dst, RTAX_REORDERING, tp->reordering);
830 }
831 }
832}
833
834__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) 733__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
835{ 734{
836 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 735 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
@@ -867,7 +766,7 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
867 * Packet counting of FACK is based on in-order assumptions, therefore TCP 766 * Packet counting of FACK is based on in-order assumptions, therefore TCP
868 * disables it when reordering is detected 767 * disables it when reordering is detected
869 */ 768 */
870static void tcp_disable_fack(struct tcp_sock *tp) 769void tcp_disable_fack(struct tcp_sock *tp)
871{ 770{
872 /* RFC3517 uses different metric in lost marker => reset on change */ 771 /* RFC3517 uses different metric in lost marker => reset on change */
873 if (tcp_is_fack(tp)) 772 if (tcp_is_fack(tp))
@@ -881,86 +780,6 @@ static void tcp_dsack_seen(struct tcp_sock *tp)
881 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; 780 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN;
882} 781}
883 782
884/* Initialize metrics on socket. */
885
886static void tcp_init_metrics(struct sock *sk)
887{
888 struct tcp_sock *tp = tcp_sk(sk);
889 struct dst_entry *dst = __sk_dst_get(sk);
890
891 if (dst == NULL)
892 goto reset;
893
894 dst_confirm(dst);
895
896 if (dst_metric_locked(dst, RTAX_CWND))
897 tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND);
898 if (dst_metric(dst, RTAX_SSTHRESH)) {
899 tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH);
900 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
901 tp->snd_ssthresh = tp->snd_cwnd_clamp;
902 } else {
903 /* ssthresh may have been reduced unnecessarily during.
904 * 3WHS. Restore it back to its initial default.
905 */
906 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
907 }
908 if (dst_metric(dst, RTAX_REORDERING) &&
909 tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
910 tcp_disable_fack(tp);
911 tcp_disable_early_retrans(tp);
912 tp->reordering = dst_metric(dst, RTAX_REORDERING);
913 }
914
915 if (dst_metric(dst, RTAX_RTT) == 0 || tp->srtt == 0)
916 goto reset;
917
918 /* Initial rtt is determined from SYN,SYN-ACK.
919 * The segment is small and rtt may appear much
920 * less than real one. Use per-dst memory
921 * to make it more realistic.
922 *
923 * A bit of theory. RTT is time passed after "normal" sized packet
924 * is sent until it is ACKed. In normal circumstances sending small
925 * packets force peer to delay ACKs and calculation is correct too.
926 * The algorithm is adaptive and, provided we follow specs, it
927 * NEVER underestimate RTT. BUT! If peer tries to make some clever
928 * tricks sort of "quick acks" for time long enough to decrease RTT
929 * to low value, and then abruptly stops to do it and starts to delay
930 * ACKs, wait for troubles.
931 */
932 if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) {
933 tp->srtt = dst_metric_rtt(dst, RTAX_RTT);
934 tp->rtt_seq = tp->snd_nxt;
935 }
936 if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) {
937 tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR);
938 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
939 }
940 tcp_set_rto(sk);
941reset:
942 if (tp->srtt == 0) {
943 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
944 * 3WHS. This is most likely due to retransmission,
945 * including spurious one. Reset the RTO back to 3secs
946 * from the more aggressive 1sec to avoid more spurious
947 * retransmission.
948 */
949 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
950 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
951 }
952 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
953 * retransmitted. In light of RFC6298 more aggressive 1sec
954 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
955 * retransmission has occurred.
956 */
957 if (tp->total_retrans > 1)
958 tp->snd_cwnd = 1;
959 else
960 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
961 tp->snd_cwnd_stamp = tcp_time_stamp;
962}
963
964static void tcp_update_reordering(struct sock *sk, const int metric, 783static void tcp_update_reordering(struct sock *sk, const int metric,
965 const int ts) 784 const int ts)
966{ 785{
@@ -2702,7 +2521,7 @@ static void tcp_cwnd_down(struct sock *sk, int flag)
2702/* Nothing was retransmitted or returned timestamp is less 2521/* Nothing was retransmitted or returned timestamp is less
2703 * than timestamp of the first retransmission. 2522 * than timestamp of the first retransmission.
2704 */ 2523 */
2705static inline int tcp_packet_delayed(const struct tcp_sock *tp) 2524static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
2706{ 2525{
2707 return !tp->retrans_stamp || 2526 return !tp->retrans_stamp ||
2708 (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 2527 (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
@@ -2763,7 +2582,7 @@ static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh)
2763 tp->snd_cwnd_stamp = tcp_time_stamp; 2582 tp->snd_cwnd_stamp = tcp_time_stamp;
2764} 2583}
2765 2584
2766static inline int tcp_may_undo(const struct tcp_sock *tp) 2585static inline bool tcp_may_undo(const struct tcp_sock *tp)
2767{ 2586{
2768 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); 2587 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
2769} 2588}
@@ -3552,13 +3371,13 @@ static void tcp_ack_probe(struct sock *sk)
3552 } 3371 }
3553} 3372}
3554 3373
3555static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag) 3374static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag)
3556{ 3375{
3557 return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || 3376 return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
3558 inet_csk(sk)->icsk_ca_state != TCP_CA_Open; 3377 inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
3559} 3378}
3560 3379
3561static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag) 3380static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
3562{ 3381{
3563 const struct tcp_sock *tp = tcp_sk(sk); 3382 const struct tcp_sock *tp = tcp_sk(sk);
3564 return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && 3383 return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
@@ -3568,7 +3387,7 @@ static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
3568/* Check that window update is acceptable. 3387/* Check that window update is acceptable.
3569 * The function assumes that snd_una<=ack<=snd_next. 3388 * The function assumes that snd_una<=ack<=snd_next.
3570 */ 3389 */
3571static inline int tcp_may_update_window(const struct tcp_sock *tp, 3390static inline bool tcp_may_update_window(const struct tcp_sock *tp,
3572 const u32 ack, const u32 ack_seq, 3391 const u32 ack, const u32 ack_seq,
3573 const u32 nwin) 3392 const u32 nwin)
3574{ 3393{
@@ -3869,9 +3688,11 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3869 tcp_cong_avoid(sk, ack, prior_in_flight); 3688 tcp_cong_avoid(sk, ack, prior_in_flight);
3870 } 3689 }
3871 3690
3872 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) 3691 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) {
3873 dst_confirm(__sk_dst_get(sk)); 3692 struct dst_entry *dst = __sk_dst_get(sk);
3874 3693 if (dst)
3694 dst_confirm(dst);
3695 }
3875 return 1; 3696 return 1;
3876 3697
3877no_queue: 3698no_queue:
@@ -3911,7 +3732,8 @@ old_ack:
3911 * the fast version below fails. 3732 * the fast version below fails.
3912 */ 3733 */
3913void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx, 3734void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx,
3914 const u8 **hvpp, int estab) 3735 const u8 **hvpp, int estab,
3736 struct tcp_fastopen_cookie *foc)
3915{ 3737{
3916 const unsigned char *ptr; 3738 const unsigned char *ptr;
3917 const struct tcphdr *th = tcp_hdr(skb); 3739 const struct tcphdr *th = tcp_hdr(skb);
@@ -4018,8 +3840,25 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o
4018 break; 3840 break;
4019 } 3841 }
4020 break; 3842 break;
4021 }
4022 3843
3844 case TCPOPT_EXP:
3845 /* Fast Open option shares code 254 using a
3846 * 16 bits magic number. It's valid only in
3847 * SYN or SYN-ACK with an even size.
3848 */
3849 if (opsize < TCPOLEN_EXP_FASTOPEN_BASE ||
3850 get_unaligned_be16(ptr) != TCPOPT_FASTOPEN_MAGIC ||
3851 foc == NULL || !th->syn || (opsize & 1))
3852 break;
3853 foc->len = opsize - TCPOLEN_EXP_FASTOPEN_BASE;
3854 if (foc->len >= TCP_FASTOPEN_COOKIE_MIN &&
3855 foc->len <= TCP_FASTOPEN_COOKIE_MAX)
3856 memcpy(foc->val, ptr + 2, foc->len);
3857 else if (foc->len != 0)
3858 foc->len = -1;
3859 break;
3860
3861 }
4023 ptr += opsize-2; 3862 ptr += opsize-2;
4024 length -= opsize; 3863 length -= opsize;
4025 } 3864 }
@@ -4061,7 +3900,7 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
4061 if (tcp_parse_aligned_timestamp(tp, th)) 3900 if (tcp_parse_aligned_timestamp(tp, th))
4062 return true; 3901 return true;
4063 } 3902 }
4064 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1); 3903 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL);
4065 return true; 3904 return true;
4066} 3905}
4067 3906
@@ -4167,7 +4006,7 @@ static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
4167 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); 4006 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
4168} 4007}
4169 4008
4170static inline int tcp_paws_discard(const struct sock *sk, 4009static inline bool tcp_paws_discard(const struct sock *sk,
4171 const struct sk_buff *skb) 4010 const struct sk_buff *skb)
4172{ 4011{
4173 const struct tcp_sock *tp = tcp_sk(sk); 4012 const struct tcp_sock *tp = tcp_sk(sk);
@@ -4189,7 +4028,7 @@ static inline int tcp_paws_discard(const struct sock *sk,
4189 * (borrowed from freebsd) 4028 * (borrowed from freebsd)
4190 */ 4029 */
4191 4030
4192static inline int tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) 4031static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq)
4193{ 4032{
4194 return !before(end_seq, tp->rcv_wup) && 4033 return !before(end_seq, tp->rcv_wup) &&
4195 !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); 4034 !after(seq, tp->rcv_nxt + tcp_receive_window(tp));
@@ -4579,8 +4418,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4579 4418
4580 TCP_ECN_check_ce(tp, skb); 4419 TCP_ECN_check_ce(tp, skb);
4581 4420
4582 if (tcp_try_rmem_schedule(sk, skb->truesize)) { 4421 if (unlikely(tcp_try_rmem_schedule(sk, skb->truesize))) {
4583 /* TODO: should increment a counter */ 4422 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
4584 __kfree_skb(skb); 4423 __kfree_skb(skb);
4585 return; 4424 return;
4586 } 4425 }
@@ -4589,6 +4428,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4589 tp->pred_flags = 0; 4428 tp->pred_flags = 0;
4590 inet_csk_schedule_ack(sk); 4429 inet_csk_schedule_ack(sk);
4591 4430
4431 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
4592 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", 4432 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
4593 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4433 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
4594 4434
@@ -4642,6 +4482,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4642 if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { 4482 if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
4643 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4483 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4644 /* All the bits are present. Drop. */ 4484 /* All the bits are present. Drop. */
4485 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
4645 __kfree_skb(skb); 4486 __kfree_skb(skb);
4646 skb = NULL; 4487 skb = NULL;
4647 tcp_dsack_set(sk, seq, end_seq); 4488 tcp_dsack_set(sk, seq, end_seq);
@@ -4680,6 +4521,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4680 __skb_unlink(skb1, &tp->out_of_order_queue); 4521 __skb_unlink(skb1, &tp->out_of_order_queue);
4681 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4522 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4682 TCP_SKB_CB(skb1)->end_seq); 4523 TCP_SKB_CB(skb1)->end_seq);
4524 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
4683 __kfree_skb(skb1); 4525 __kfree_skb(skb1);
4684 } 4526 }
4685 4527
@@ -5372,7 +5214,7 @@ static __sum16 __tcp_checksum_complete_user(struct sock *sk,
5372 return result; 5214 return result;
5373} 5215}
5374 5216
5375static inline int tcp_checksum_complete_user(struct sock *sk, 5217static inline bool tcp_checksum_complete_user(struct sock *sk,
5376 struct sk_buff *skb) 5218 struct sk_buff *skb)
5377{ 5219{
5378 return !skb_csum_unnecessary(skb) && 5220 return !skb_csum_unnecessary(skb) &&
@@ -5426,11 +5268,28 @@ out:
5426} 5268}
5427#endif /* CONFIG_NET_DMA */ 5269#endif /* CONFIG_NET_DMA */
5428 5270
5271static void tcp_send_challenge_ack(struct sock *sk)
5272{
5273 /* unprotected vars, we dont care of overwrites */
5274 static u32 challenge_timestamp;
5275 static unsigned int challenge_count;
5276 u32 now = jiffies / HZ;
5277
5278 if (now != challenge_timestamp) {
5279 challenge_timestamp = now;
5280 challenge_count = 0;
5281 }
5282 if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
5283 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
5284 tcp_send_ack(sk);
5285 }
5286}
5287
5429/* Does PAWS and seqno based validation of an incoming segment, flags will 5288/* Does PAWS and seqno based validation of an incoming segment, flags will
5430 * play significant role here. 5289 * play significant role here.
5431 */ 5290 */
5432static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, 5291static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5433 const struct tcphdr *th, int syn_inerr) 5292 const struct tcphdr *th, int syn_inerr)
5434{ 5293{
5435 const u8 *hash_location; 5294 const u8 *hash_location;
5436 struct tcp_sock *tp = tcp_sk(sk); 5295 struct tcp_sock *tp = tcp_sk(sk);
@@ -5455,14 +5314,26 @@ static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5455 * an acknowledgment should be sent in reply (unless the RST 5314 * an acknowledgment should be sent in reply (unless the RST
5456 * bit is set, if so drop the segment and return)". 5315 * bit is set, if so drop the segment and return)".
5457 */ 5316 */
5458 if (!th->rst) 5317 if (!th->rst) {
5318 if (th->syn)
5319 goto syn_challenge;
5459 tcp_send_dupack(sk, skb); 5320 tcp_send_dupack(sk, skb);
5321 }
5460 goto discard; 5322 goto discard;
5461 } 5323 }
5462 5324
5463 /* Step 2: check RST bit */ 5325 /* Step 2: check RST bit */
5464 if (th->rst) { 5326 if (th->rst) {
5465 tcp_reset(sk); 5327 /* RFC 5961 3.2 :
5328 * If sequence number exactly matches RCV.NXT, then
5329 * RESET the connection
5330 * else
5331 * Send a challenge ACK
5332 */
5333 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt)
5334 tcp_reset(sk);
5335 else
5336 tcp_send_challenge_ack(sk);
5466 goto discard; 5337 goto discard;
5467 } 5338 }
5468 5339
@@ -5473,20 +5344,23 @@ static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5473 5344
5474 /* step 3: check security and precedence [ignored] */ 5345 /* step 3: check security and precedence [ignored] */
5475 5346
5476 /* step 4: Check for a SYN in window. */ 5347 /* step 4: Check for a SYN
5477 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 5348 * RFC 5691 4.2 : Send a challenge ack
5349 */
5350 if (th->syn) {
5351syn_challenge:
5478 if (syn_inerr) 5352 if (syn_inerr)
5479 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 5353 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
5480 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN); 5354 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
5481 tcp_reset(sk); 5355 tcp_send_challenge_ack(sk);
5482 return -1; 5356 goto discard;
5483 } 5357 }
5484 5358
5485 return 1; 5359 return true;
5486 5360
5487discard: 5361discard:
5488 __kfree_skb(skb); 5362 __kfree_skb(skb);
5489 return 0; 5363 return false;
5490} 5364}
5491 5365
5492/* 5366/*
@@ -5516,7 +5390,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5516 const struct tcphdr *th, unsigned int len) 5390 const struct tcphdr *th, unsigned int len)
5517{ 5391{
5518 struct tcp_sock *tp = tcp_sk(sk); 5392 struct tcp_sock *tp = tcp_sk(sk);
5519 int res;
5520 5393
5521 /* 5394 /*
5522 * Header prediction. 5395 * Header prediction.
@@ -5693,9 +5566,8 @@ slow_path:
5693 * Standard slow path. 5566 * Standard slow path.
5694 */ 5567 */
5695 5568
5696 res = tcp_validate_incoming(sk, skb, th, 1); 5569 if (!tcp_validate_incoming(sk, skb, th, 1))
5697 if (res <= 0) 5570 return 0;
5698 return -res;
5699 5571
5700step5: 5572step5:
5701 if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) 5573 if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
@@ -5729,8 +5601,10 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5729 5601
5730 tcp_set_state(sk, TCP_ESTABLISHED); 5602 tcp_set_state(sk, TCP_ESTABLISHED);
5731 5603
5732 if (skb != NULL) 5604 if (skb != NULL) {
5605 sk->sk_rx_dst = dst_clone(skb_dst(skb));
5733 security_inet_conn_established(sk, skb); 5606 security_inet_conn_established(sk, skb);
5607 }
5734 5608
5735 /* Make sure socket is routed, for correct metrics. */ 5609 /* Make sure socket is routed, for correct metrics. */
5736 icsk->icsk_af_ops->rebuild_header(sk); 5610 icsk->icsk_af_ops->rebuild_header(sk);
@@ -5760,6 +5634,45 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5760 } 5634 }
5761} 5635}
5762 5636
5637static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5638 struct tcp_fastopen_cookie *cookie)
5639{
5640 struct tcp_sock *tp = tcp_sk(sk);
5641 struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL;
5642 u16 mss = tp->rx_opt.mss_clamp;
5643 bool syn_drop;
5644
5645 if (mss == tp->rx_opt.user_mss) {
5646 struct tcp_options_received opt;
5647 const u8 *hash_location;
5648
5649 /* Get original SYNACK MSS value if user MSS sets mss_clamp */
5650 tcp_clear_options(&opt);
5651 opt.user_mss = opt.mss_clamp = 0;
5652 tcp_parse_options(synack, &opt, &hash_location, 0, NULL);
5653 mss = opt.mss_clamp;
5654 }
5655
5656 if (!tp->syn_fastopen) /* Ignore an unsolicited cookie */
5657 cookie->len = -1;
5658
5659 /* The SYN-ACK neither has cookie nor acknowledges the data. Presumably
5660 * the remote receives only the retransmitted (regular) SYNs: either
5661 * the original SYN-data or the corresponding SYN-ACK is lost.
5662 */
5663 syn_drop = (cookie->len <= 0 && data &&
5664 inet_csk(sk)->icsk_retransmits);
5665
5666 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
5667
5668 if (data) { /* Retransmit unacked data in SYN */
5669 tcp_retransmit_skb(sk, data);
5670 tcp_rearm_rto(sk);
5671 return true;
5672 }
5673 return false;
5674}
5675
5763static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, 5676static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5764 const struct tcphdr *th, unsigned int len) 5677 const struct tcphdr *th, unsigned int len)
5765{ 5678{
@@ -5767,9 +5680,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5767 struct inet_connection_sock *icsk = inet_csk(sk); 5680 struct inet_connection_sock *icsk = inet_csk(sk);
5768 struct tcp_sock *tp = tcp_sk(sk); 5681 struct tcp_sock *tp = tcp_sk(sk);
5769 struct tcp_cookie_values *cvp = tp->cookie_values; 5682 struct tcp_cookie_values *cvp = tp->cookie_values;
5683 struct tcp_fastopen_cookie foc = { .len = -1 };
5770 int saved_clamp = tp->rx_opt.mss_clamp; 5684 int saved_clamp = tp->rx_opt.mss_clamp;
5771 5685
5772 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0); 5686 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, &foc);
5773 5687
5774 if (th->ack) { 5688 if (th->ack) {
5775 /* rfc793: 5689 /* rfc793:
@@ -5779,11 +5693,9 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5779 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send 5693 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send
5780 * a reset (unless the RST bit is set, if so drop 5694 * a reset (unless the RST bit is set, if so drop
5781 * the segment and return)" 5695 * the segment and return)"
5782 *
5783 * We do not send data with SYN, so that RFC-correct
5784 * test reduces to:
5785 */ 5696 */
5786 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt) 5697 if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) ||
5698 after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt))
5787 goto reset_and_undo; 5699 goto reset_and_undo;
5788 5700
5789 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 5701 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
@@ -5895,6 +5807,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5895 5807
5896 tcp_finish_connect(sk, skb); 5808 tcp_finish_connect(sk, skb);
5897 5809
5810 if ((tp->syn_fastopen || tp->syn_data) &&
5811 tcp_rcv_fastopen_synack(sk, skb, &foc))
5812 return -1;
5813
5898 if (sk->sk_write_pending || 5814 if (sk->sk_write_pending ||
5899 icsk->icsk_accept_queue.rskq_defer_accept || 5815 icsk->icsk_accept_queue.rskq_defer_accept ||
5900 icsk->icsk_ack.pingpong) { 5816 icsk->icsk_ack.pingpong) {
@@ -6013,7 +5929,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
6013 struct tcp_sock *tp = tcp_sk(sk); 5929 struct tcp_sock *tp = tcp_sk(sk);
6014 struct inet_connection_sock *icsk = inet_csk(sk); 5930 struct inet_connection_sock *icsk = inet_csk(sk);
6015 int queued = 0; 5931 int queued = 0;
6016 int res;
6017 5932
6018 tp->rx_opt.saw_tstamp = 0; 5933 tp->rx_opt.saw_tstamp = 0;
6019 5934
@@ -6068,9 +5983,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
6068 return 0; 5983 return 0;
6069 } 5984 }
6070 5985
6071 res = tcp_validate_incoming(sk, skb, th, 0); 5986 if (!tcp_validate_incoming(sk, skb, th, 0))
6072 if (res <= 0) 5987 return 0;
6073 return -res;
6074 5988
6075 /* step 5: check the ACK field */ 5989 /* step 5: check the ACK field */
6076 if (th->ack) { 5990 if (th->ack) {
@@ -6126,9 +6040,14 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
6126 6040
6127 case TCP_FIN_WAIT1: 6041 case TCP_FIN_WAIT1:
6128 if (tp->snd_una == tp->write_seq) { 6042 if (tp->snd_una == tp->write_seq) {
6043 struct dst_entry *dst;
6044
6129 tcp_set_state(sk, TCP_FIN_WAIT2); 6045 tcp_set_state(sk, TCP_FIN_WAIT2);
6130 sk->sk_shutdown |= SEND_SHUTDOWN; 6046 sk->sk_shutdown |= SEND_SHUTDOWN;
6131 dst_confirm(__sk_dst_get(sk)); 6047
6048 dst = __sk_dst_get(sk);
6049 if (dst)
6050 dst_confirm(dst);
6132 6051
6133 if (!sock_flag(sk, SOCK_DEAD)) 6052 if (!sock_flag(sk, SOCK_DEAD))
6134 /* Wake up lingering close() */ 6053 /* Wake up lingering close() */