aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c38
1 files changed, 12 insertions, 26 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ad70a962c20e..3bd55bad230a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -81,8 +81,6 @@ int sysctl_tcp_sack __read_mostly = 1;
81int sysctl_tcp_fack __read_mostly = 1; 81int sysctl_tcp_fack __read_mostly = 1;
82int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; 82int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
83EXPORT_SYMBOL(sysctl_tcp_reordering); 83EXPORT_SYMBOL(sysctl_tcp_reordering);
84int sysctl_tcp_ecn __read_mostly = 2;
85EXPORT_SYMBOL(sysctl_tcp_ecn);
86int sysctl_tcp_dsack __read_mostly = 1; 84int sysctl_tcp_dsack __read_mostly = 1;
87int sysctl_tcp_app_win __read_mostly = 31; 85int sysctl_tcp_app_win __read_mostly = 31;
88int sysctl_tcp_adv_win_scale __read_mostly = 1; 86int sysctl_tcp_adv_win_scale __read_mostly = 1;
@@ -100,7 +98,6 @@ int sysctl_tcp_frto_response __read_mostly;
100int sysctl_tcp_thin_dupack __read_mostly; 98int sysctl_tcp_thin_dupack __read_mostly;
101 99
102int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; 100int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
103int sysctl_tcp_abc __read_mostly;
104int sysctl_tcp_early_retrans __read_mostly = 2; 101int sysctl_tcp_early_retrans __read_mostly = 2;
105 102
106#define FLAG_DATA 0x01 /* Incoming frame contained data. */ 103#define FLAG_DATA 0x01 /* Incoming frame contained data. */
@@ -2009,7 +2006,6 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
2009 tp->snd_cwnd_cnt = 0; 2006 tp->snd_cwnd_cnt = 0;
2010 tp->snd_cwnd_stamp = tcp_time_stamp; 2007 tp->snd_cwnd_stamp = tcp_time_stamp;
2011 tp->frto_counter = 0; 2008 tp->frto_counter = 0;
2012 tp->bytes_acked = 0;
2013 2009
2014 tp->reordering = min_t(unsigned int, tp->reordering, 2010 tp->reordering = min_t(unsigned int, tp->reordering,
2015 sysctl_tcp_reordering); 2011 sysctl_tcp_reordering);
@@ -2058,17 +2054,13 @@ void tcp_enter_loss(struct sock *sk, int how)
2058 tp->snd_cwnd_cnt = 0; 2054 tp->snd_cwnd_cnt = 0;
2059 tp->snd_cwnd_stamp = tcp_time_stamp; 2055 tp->snd_cwnd_stamp = tcp_time_stamp;
2060 2056
2061 tp->bytes_acked = 0;
2062 tcp_clear_retrans_partial(tp); 2057 tcp_clear_retrans_partial(tp);
2063 2058
2064 if (tcp_is_reno(tp)) 2059 if (tcp_is_reno(tp))
2065 tcp_reset_reno_sack(tp); 2060 tcp_reset_reno_sack(tp);
2066 2061
2067 if (!how) { 2062 tp->undo_marker = tp->snd_una;
2068 /* Push undo marker, if it was plain RTO and nothing 2063 if (how) {
2069 * was retransmitted. */
2070 tp->undo_marker = tp->snd_una;
2071 } else {
2072 tp->sacked_out = 0; 2064 tp->sacked_out = 0;
2073 tp->fackets_out = 0; 2065 tp->fackets_out = 0;
2074 } 2066 }
@@ -2686,7 +2678,6 @@ static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh)
2686 struct tcp_sock *tp = tcp_sk(sk); 2678 struct tcp_sock *tp = tcp_sk(sk);
2687 2679
2688 tp->high_seq = tp->snd_nxt; 2680 tp->high_seq = tp->snd_nxt;
2689 tp->bytes_acked = 0;
2690 tp->snd_cwnd_cnt = 0; 2681 tp->snd_cwnd_cnt = 0;
2691 tp->prior_cwnd = tp->snd_cwnd; 2682 tp->prior_cwnd = tp->snd_cwnd;
2692 tp->prr_delivered = 0; 2683 tp->prr_delivered = 0;
@@ -2737,7 +2728,6 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
2737 struct tcp_sock *tp = tcp_sk(sk); 2728 struct tcp_sock *tp = tcp_sk(sk);
2738 2729
2739 tp->prior_ssthresh = 0; 2730 tp->prior_ssthresh = 0;
2740 tp->bytes_acked = 0;
2741 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 2731 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
2742 tp->undo_marker = 0; 2732 tp->undo_marker = 0;
2743 tcp_init_cwnd_reduction(sk, set_ssthresh); 2733 tcp_init_cwnd_reduction(sk, set_ssthresh);
@@ -3419,7 +3409,6 @@ static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
3419{ 3409{
3420 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 3410 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
3421 tp->snd_cwnd_cnt = 0; 3411 tp->snd_cwnd_cnt = 0;
3422 tp->bytes_acked = 0;
3423 TCP_ECN_queue_cwr(tp); 3412 TCP_ECN_queue_cwr(tp);
3424 tcp_moderate_cwnd(tp); 3413 tcp_moderate_cwnd(tp);
3425} 3414}
@@ -3615,15 +3604,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3615 if (after(ack, prior_snd_una)) 3604 if (after(ack, prior_snd_una))
3616 flag |= FLAG_SND_UNA_ADVANCED; 3605 flag |= FLAG_SND_UNA_ADVANCED;
3617 3606
3618 if (sysctl_tcp_abc) {
3619 if (icsk->icsk_ca_state < TCP_CA_CWR)
3620 tp->bytes_acked += ack - prior_snd_una;
3621 else if (icsk->icsk_ca_state == TCP_CA_Loss)
3622 /* we assume just one segment left network */
3623 tp->bytes_acked += min(ack - prior_snd_una,
3624 tp->mss_cache);
3625 }
3626
3627 prior_fackets = tp->fackets_out; 3607 prior_fackets = tp->fackets_out;
3628 prior_in_flight = tcp_packets_in_flight(tp); 3608 prior_in_flight = tcp_packets_in_flight(tp);
3629 3609
@@ -3877,7 +3857,7 @@ static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr
3877 ++ptr; 3857 ++ptr;
3878 tp->rx_opt.rcv_tsval = ntohl(*ptr); 3858 tp->rx_opt.rcv_tsval = ntohl(*ptr);
3879 ++ptr; 3859 ++ptr;
3880 tp->rx_opt.rcv_tsecr = ntohl(*ptr); 3860 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
3881 return true; 3861 return true;
3882 } 3862 }
3883 return false; 3863 return false;
@@ -3901,7 +3881,11 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
3901 if (tcp_parse_aligned_timestamp(tp, th)) 3881 if (tcp_parse_aligned_timestamp(tp, th))
3902 return true; 3882 return true;
3903 } 3883 }
3884
3904 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL); 3885 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL);
3886 if (tp->rx_opt.saw_tstamp)
3887 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
3888
3905 return true; 3889 return true;
3906} 3890}
3907 3891
@@ -5498,6 +5482,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5498 if (tcp_checksum_complete_user(sk, skb)) 5482 if (tcp_checksum_complete_user(sk, skb))
5499 goto csum_error; 5483 goto csum_error;
5500 5484
5485 if ((int)skb->truesize > sk->sk_forward_alloc)
5486 goto step5;
5487
5501 /* Predicted packet is in window by definition. 5488 /* Predicted packet is in window by definition.
5502 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5489 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
5503 * Hence, check seq<=rcv_wup reduces to: 5490 * Hence, check seq<=rcv_wup reduces to:
@@ -5509,9 +5496,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5509 5496
5510 tcp_rcv_rtt_measure_ts(sk, skb); 5497 tcp_rcv_rtt_measure_ts(sk, skb);
5511 5498
5512 if ((int)skb->truesize > sk->sk_forward_alloc)
5513 goto step5;
5514
5515 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); 5499 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
5516 5500
5517 /* Bulk data transfer: receiver */ 5501 /* Bulk data transfer: receiver */
@@ -5682,6 +5666,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5682 int saved_clamp = tp->rx_opt.mss_clamp; 5666 int saved_clamp = tp->rx_opt.mss_clamp;
5683 5667
5684 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, &foc); 5668 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, &foc);
5669 if (tp->rx_opt.saw_tstamp)
5670 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
5685 5671
5686 if (th->ack) { 5672 if (th->ack) {
5687 /* rfc793: 5673 /* rfc793: