aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c47
1 files changed, 23 insertions, 24 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a13692560e63..a759e19496d2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -81,8 +81,6 @@ int sysctl_tcp_sack __read_mostly = 1;
81int sysctl_tcp_fack __read_mostly = 1; 81int sysctl_tcp_fack __read_mostly = 1;
82int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; 82int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
83EXPORT_SYMBOL(sysctl_tcp_reordering); 83EXPORT_SYMBOL(sysctl_tcp_reordering);
84int sysctl_tcp_ecn __read_mostly = 2;
85EXPORT_SYMBOL(sysctl_tcp_ecn);
86int sysctl_tcp_dsack __read_mostly = 1; 84int sysctl_tcp_dsack __read_mostly = 1;
87int sysctl_tcp_app_win __read_mostly = 31; 85int sysctl_tcp_app_win __read_mostly = 31;
88int sysctl_tcp_adv_win_scale __read_mostly = 1; 86int sysctl_tcp_adv_win_scale __read_mostly = 1;
@@ -100,7 +98,6 @@ int sysctl_tcp_frto_response __read_mostly;
100int sysctl_tcp_thin_dupack __read_mostly; 98int sysctl_tcp_thin_dupack __read_mostly;
101 99
102int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; 100int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
103int sysctl_tcp_abc __read_mostly;
104int sysctl_tcp_early_retrans __read_mostly = 2; 101int sysctl_tcp_early_retrans __read_mostly = 2;
105 102
106#define FLAG_DATA 0x01 /* Incoming frame contained data. */ 103#define FLAG_DATA 0x01 /* Incoming frame contained data. */
@@ -2009,7 +2006,6 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
2009 tp->snd_cwnd_cnt = 0; 2006 tp->snd_cwnd_cnt = 0;
2010 tp->snd_cwnd_stamp = tcp_time_stamp; 2007 tp->snd_cwnd_stamp = tcp_time_stamp;
2011 tp->frto_counter = 0; 2008 tp->frto_counter = 0;
2012 tp->bytes_acked = 0;
2013 2009
2014 tp->reordering = min_t(unsigned int, tp->reordering, 2010 tp->reordering = min_t(unsigned int, tp->reordering,
2015 sysctl_tcp_reordering); 2011 sysctl_tcp_reordering);
@@ -2058,7 +2054,6 @@ void tcp_enter_loss(struct sock *sk, int how)
2058 tp->snd_cwnd_cnt = 0; 2054 tp->snd_cwnd_cnt = 0;
2059 tp->snd_cwnd_stamp = tcp_time_stamp; 2055 tp->snd_cwnd_stamp = tcp_time_stamp;
2060 2056
2061 tp->bytes_acked = 0;
2062 tcp_clear_retrans_partial(tp); 2057 tcp_clear_retrans_partial(tp);
2063 2058
2064 if (tcp_is_reno(tp)) 2059 if (tcp_is_reno(tp))
@@ -2686,7 +2681,6 @@ static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh)
2686 struct tcp_sock *tp = tcp_sk(sk); 2681 struct tcp_sock *tp = tcp_sk(sk);
2687 2682
2688 tp->high_seq = tp->snd_nxt; 2683 tp->high_seq = tp->snd_nxt;
2689 tp->bytes_acked = 0;
2690 tp->snd_cwnd_cnt = 0; 2684 tp->snd_cwnd_cnt = 0;
2691 tp->prior_cwnd = tp->snd_cwnd; 2685 tp->prior_cwnd = tp->snd_cwnd;
2692 tp->prr_delivered = 0; 2686 tp->prr_delivered = 0;
@@ -2737,7 +2731,6 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
2737 struct tcp_sock *tp = tcp_sk(sk); 2731 struct tcp_sock *tp = tcp_sk(sk);
2738 2732
2739 tp->prior_ssthresh = 0; 2733 tp->prior_ssthresh = 0;
2740 tp->bytes_acked = 0;
2741 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 2734 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
2742 tp->undo_marker = 0; 2735 tp->undo_marker = 0;
2743 tcp_init_cwnd_reduction(sk, set_ssthresh); 2736 tcp_init_cwnd_reduction(sk, set_ssthresh);
@@ -3419,7 +3412,6 @@ static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
3419{ 3412{
3420 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 3413 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
3421 tp->snd_cwnd_cnt = 0; 3414 tp->snd_cwnd_cnt = 0;
3422 tp->bytes_acked = 0;
3423 TCP_ECN_queue_cwr(tp); 3415 TCP_ECN_queue_cwr(tp);
3424 tcp_moderate_cwnd(tp); 3416 tcp_moderate_cwnd(tp);
3425} 3417}
@@ -3504,6 +3496,11 @@ static bool tcp_process_frto(struct sock *sk, int flag)
3504 } 3496 }
3505 } else { 3497 } else {
3506 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { 3498 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
3499 if (!tcp_packets_in_flight(tp)) {
3500 tcp_enter_frto_loss(sk, 2, flag);
3501 return true;
3502 }
3503
3507 /* Prevent sending of new data. */ 3504 /* Prevent sending of new data. */
3508 tp->snd_cwnd = min(tp->snd_cwnd, 3505 tp->snd_cwnd = min(tp->snd_cwnd,
3509 tcp_packets_in_flight(tp)); 3506 tcp_packets_in_flight(tp));
@@ -3610,15 +3607,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3610 if (after(ack, prior_snd_una)) 3607 if (after(ack, prior_snd_una))
3611 flag |= FLAG_SND_UNA_ADVANCED; 3608 flag |= FLAG_SND_UNA_ADVANCED;
3612 3609
3613 if (sysctl_tcp_abc) {
3614 if (icsk->icsk_ca_state < TCP_CA_CWR)
3615 tp->bytes_acked += ack - prior_snd_una;
3616 else if (icsk->icsk_ca_state == TCP_CA_Loss)
3617 /* we assume just one segment left network */
3618 tp->bytes_acked += min(ack - prior_snd_una,
3619 tp->mss_cache);
3620 }
3621
3622 prior_fackets = tp->fackets_out; 3610 prior_fackets = tp->fackets_out;
3623 prior_in_flight = tcp_packets_in_flight(tp); 3611 prior_in_flight = tcp_packets_in_flight(tp);
3624 3612
@@ -3872,7 +3860,7 @@ static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr
3872 ++ptr; 3860 ++ptr;
3873 tp->rx_opt.rcv_tsval = ntohl(*ptr); 3861 tp->rx_opt.rcv_tsval = ntohl(*ptr);
3874 ++ptr; 3862 ++ptr;
3875 tp->rx_opt.rcv_tsecr = ntohl(*ptr); 3863 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
3876 return true; 3864 return true;
3877 } 3865 }
3878 return false; 3866 return false;
@@ -3896,7 +3884,11 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
3896 if (tcp_parse_aligned_timestamp(tp, th)) 3884 if (tcp_parse_aligned_timestamp(tp, th))
3897 return true; 3885 return true;
3898 } 3886 }
3887
3899 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL); 3888 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL);
3889 if (tp->rx_opt.saw_tstamp)
3890 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
3891
3900 return true; 3892 return true;
3901} 3893}
3902 3894
@@ -5543,6 +5535,9 @@ slow_path:
5543 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) 5535 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
5544 goto csum_error; 5536 goto csum_error;
5545 5537
5538 if (!th->ack && !th->rst)
5539 goto discard;
5540
5546 /* 5541 /*
5547 * Standard slow path. 5542 * Standard slow path.
5548 */ 5543 */
@@ -5551,7 +5546,7 @@ slow_path:
5551 return 0; 5546 return 0;
5552 5547
5553step5: 5548step5:
5554 if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) 5549 if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
5555 goto discard; 5550 goto discard;
5556 5551
5557 /* ts_recent update must be made after we are sure that the packet 5552 /* ts_recent update must be made after we are sure that the packet
@@ -5646,8 +5641,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5646 * the remote receives only the retransmitted (regular) SYNs: either 5641 * the remote receives only the retransmitted (regular) SYNs: either
5647 * the original SYN-data or the corresponding SYN-ACK is lost. 5642 * the original SYN-data or the corresponding SYN-ACK is lost.
5648 */ 5643 */
5649 syn_drop = (cookie->len <= 0 && data && 5644 syn_drop = (cookie->len <= 0 && data && tp->total_retrans);
5650 inet_csk(sk)->icsk_retransmits);
5651 5645
5652 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); 5646 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
5653 5647
@@ -5675,6 +5669,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5675 int saved_clamp = tp->rx_opt.mss_clamp; 5669 int saved_clamp = tp->rx_opt.mss_clamp;
5676 5670
5677 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, &foc); 5671 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, &foc);
5672 if (tp->rx_opt.saw_tstamp)
5673 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
5678 5674
5679 if (th->ack) { 5675 if (th->ack) {
5680 /* rfc793: 5676 /* rfc793:
@@ -5984,11 +5980,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5984 if (tcp_check_req(sk, skb, req, NULL, true) == NULL) 5980 if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
5985 goto discard; 5981 goto discard;
5986 } 5982 }
5983
5984 if (!th->ack && !th->rst)
5985 goto discard;
5986
5987 if (!tcp_validate_incoming(sk, skb, th, 0)) 5987 if (!tcp_validate_incoming(sk, skb, th, 0))
5988 return 0; 5988 return 0;
5989 5989
5990 /* step 5: check the ACK field */ 5990 /* step 5: check the ACK field */
5991 if (th->ack) { 5991 if (true) {
5992 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0; 5992 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
5993 5993
5994 switch (sk->sk_state) { 5994 switch (sk->sk_state) {
@@ -6138,8 +6138,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
6138 } 6138 }
6139 break; 6139 break;
6140 } 6140 }
6141 } else 6141 }
6142 goto discard;
6143 6142
6144 /* ts_recent update must be made after we are sure that the packet 6143 /* ts_recent update must be made after we are sure that the packet
6145 * is in window. 6144 * is in window.