aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 9944c1d9a218..257b61789eeb 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -85,7 +85,7 @@ int sysctl_tcp_ecn __read_mostly = 2;
85EXPORT_SYMBOL(sysctl_tcp_ecn); 85EXPORT_SYMBOL(sysctl_tcp_ecn);
86int sysctl_tcp_dsack __read_mostly = 1; 86int sysctl_tcp_dsack __read_mostly = 1;
87int sysctl_tcp_app_win __read_mostly = 31; 87int sysctl_tcp_app_win __read_mostly = 31;
88int sysctl_tcp_adv_win_scale __read_mostly = 2; 88int sysctl_tcp_adv_win_scale __read_mostly = 1;
89EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); 89EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
90 90
91int sysctl_tcp_stdurg __read_mostly; 91int sysctl_tcp_stdurg __read_mostly;
@@ -335,6 +335,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
335 incr = __tcp_grow_window(sk, skb); 335 incr = __tcp_grow_window(sk, skb);
336 336
337 if (incr) { 337 if (incr) {
338 incr = max_t(int, incr, 2 * skb->len);
338 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, 339 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
339 tp->window_clamp); 340 tp->window_clamp);
340 inet_csk(sk)->icsk_ack.quick |= 1; 341 inet_csk(sk)->icsk_ack.quick |= 1;
@@ -494,7 +495,7 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
494 goto new_measure; 495 goto new_measure;
495 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) 496 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
496 return; 497 return;
497 tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1); 498 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1);
498 499
499new_measure: 500new_measure:
500 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; 501 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
@@ -2867,11 +2868,14 @@ static inline void tcp_complete_cwr(struct sock *sk)
2867 2868
2868 /* Do not moderate cwnd if it's already undone in cwr or recovery. */ 2869 /* Do not moderate cwnd if it's already undone in cwr or recovery. */
2869 if (tp->undo_marker) { 2870 if (tp->undo_marker) {
2870 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) 2871 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) {
2871 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 2872 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
2872 else /* PRR */ 2873 tp->snd_cwnd_stamp = tcp_time_stamp;
2874 } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) {
2875 /* PRR algorithm. */
2873 tp->snd_cwnd = tp->snd_ssthresh; 2876 tp->snd_cwnd = tp->snd_ssthresh;
2874 tp->snd_cwnd_stamp = tcp_time_stamp; 2877 tp->snd_cwnd_stamp = tcp_time_stamp;
2878 }
2875 } 2879 }
2876 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2880 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
2877} 2881}