aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/net/tcp.h1
-rw-r--r--net/ipv4/tcp_input.c22
-rw-r--r--net/ipv4/tcp_output.c22
3 files changed, 22 insertions, 23 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 87d877408188..163d2b467d78 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -558,7 +558,6 @@ void tcp_send_loss_probe(struct sock *sk);
558bool tcp_schedule_loss_probe(struct sock *sk); 558bool tcp_schedule_loss_probe(struct sock *sk);
559 559
560/* tcp_input.c */ 560/* tcp_input.c */
561void tcp_cwnd_application_limited(struct sock *sk);
562void tcp_resume_early_retransmit(struct sock *sk); 561void tcp_resume_early_retransmit(struct sock *sk);
563void tcp_rearm_rto(struct sock *sk); 562void tcp_rearm_rto(struct sock *sk);
564void tcp_reset(struct sock *sk); 563void tcp_reset(struct sock *sk);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d6b46eb2f94c..6efed134ab63 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4703,28 +4703,6 @@ static int tcp_prune_queue(struct sock *sk)
4703 return -1; 4703 return -1;
4704} 4704}
4705 4705
4706/* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
4707 * As additional protections, we do not touch cwnd in retransmission phases,
4708 * and if application hit its sndbuf limit recently.
4709 */
4710void tcp_cwnd_application_limited(struct sock *sk)
4711{
4712 struct tcp_sock *tp = tcp_sk(sk);
4713
4714 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
4715 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
4716 /* Limited by application or receiver window. */
4717 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
4718 u32 win_used = max(tp->snd_cwnd_used, init_win);
4719 if (win_used < tp->snd_cwnd) {
4720 tp->snd_ssthresh = tcp_current_ssthresh(sk);
4721 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
4722 }
4723 tp->snd_cwnd_used = 0;
4724 }
4725 tp->snd_cwnd_stamp = tcp_time_stamp;
4726}
4727
4728static bool tcp_should_expand_sndbuf(const struct sock *sk) 4706static bool tcp_should_expand_sndbuf(const struct sock *sk)
4729{ 4707{
4730 const struct tcp_sock *tp = tcp_sk(sk); 4708 const struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 025e25093984..29dde97c3c41 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1387,6 +1387,28 @@ unsigned int tcp_current_mss(struct sock *sk)
1387 return mss_now; 1387 return mss_now;
1388} 1388}
1389 1389
1390/* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
1391 * As additional protections, we do not touch cwnd in retransmission phases,
1392 * and if application hit its sndbuf limit recently.
1393 */
1394static void tcp_cwnd_application_limited(struct sock *sk)
1395{
1396 struct tcp_sock *tp = tcp_sk(sk);
1397
1398 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
1399 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1400 /* Limited by application or receiver window. */
1401 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
1402 u32 win_used = max(tp->snd_cwnd_used, init_win);
1403 if (win_used < tp->snd_cwnd) {
1404 tp->snd_ssthresh = tcp_current_ssthresh(sk);
1405 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
1406 }
1407 tp->snd_cwnd_used = 0;
1408 }
1409 tp->snd_cwnd_stamp = tcp_time_stamp;
1410}
1411
1390/* Congestion window validation. (RFC2861) */ 1412/* Congestion window validation. (RFC2861) */
1391static void tcp_cwnd_validate(struct sock *sk) 1413static void tcp_cwnd_validate(struct sock *sk)
1392{ 1414{