aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/tcp.h
diff options
context:
space:
mode:
authorYuchung Cheng <ycheng@google.com>2013-10-31 14:07:31 -0400
committerDavid S. Miller <davem@davemloft.net>2013-11-04 19:57:59 -0500
commit9f9843a751d0a2057f9f3d313886e7e5e6ebaac9 (patch)
treea89df5cc0c5f5280b2cfffba7f6933e4db20736f /include/net/tcp.h
parent0d41cca490c274352211efac50e9598d39a9dc80 (diff)
tcp: properly handle stretch acks in slow start
Slow start now increases cwnd by 1 if an ACK acknowledges some packets, regardless the number of packets. Consequently slow start performance is highly dependent on the degree of the stretch ACKs caused by receiver or network ACK compression mechanisms (e.g., delayed-ACK, GRO, etc). But slow start algorithm is to send twice the amount of packets of packets left so it should process a stretch ACK of degree N as if N ACKs of degree 1, then exits when cwnd exceeds ssthresh. A follow up patch will use the remainder of the N (if greater than 1) to adjust cwnd in the congestion avoidance phase. In addition this patch retires the experimental limited slow start (LSS) feature. LSS has multiple drawbacks but questionable benefit. The fractional cwnd increase in LSS requires a loop in slow start even though it's rarely used. Configuring such an increase step via a global sysctl on different BDPS seems hard. Finally and most importantly the slow start overshoot concern is now better covered by the Hybrid slow start (hystart) enabled by default. Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r--include/net/tcp.h7
1 files changed, 3 insertions, 4 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 2d7b4bdc972f..70e55d200610 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -275,7 +275,6 @@ extern int sysctl_tcp_mtu_probing;
275extern int sysctl_tcp_base_mss; 275extern int sysctl_tcp_base_mss;
276extern int sysctl_tcp_workaround_signed_windows; 276extern int sysctl_tcp_workaround_signed_windows;
277extern int sysctl_tcp_slow_start_after_idle; 277extern int sysctl_tcp_slow_start_after_idle;
278extern int sysctl_tcp_max_ssthresh;
279extern int sysctl_tcp_thin_linear_timeouts; 278extern int sysctl_tcp_thin_linear_timeouts;
280extern int sysctl_tcp_thin_dupack; 279extern int sysctl_tcp_thin_dupack;
281extern int sysctl_tcp_early_retrans; 280extern int sysctl_tcp_early_retrans;
@@ -797,7 +796,7 @@ struct tcp_congestion_ops {
797 /* lower bound for congestion window (optional) */ 796 /* lower bound for congestion window (optional) */
798 u32 (*min_cwnd)(const struct sock *sk); 797 u32 (*min_cwnd)(const struct sock *sk);
799 /* do new cwnd calculation (required) */ 798 /* do new cwnd calculation (required) */
800 void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight); 799 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
801 /* call before changing ca_state (optional) */ 800 /* call before changing ca_state (optional) */
802 void (*set_state)(struct sock *sk, u8 new_state); 801 void (*set_state)(struct sock *sk, u8 new_state);
803 /* call when cwnd event occurs (optional) */ 802 /* call when cwnd event occurs (optional) */
@@ -824,12 +823,12 @@ void tcp_get_available_congestion_control(char *buf, size_t len);
824void tcp_get_allowed_congestion_control(char *buf, size_t len); 823void tcp_get_allowed_congestion_control(char *buf, size_t len);
825int tcp_set_allowed_congestion_control(char *allowed); 824int tcp_set_allowed_congestion_control(char *allowed);
826int tcp_set_congestion_control(struct sock *sk, const char *name); 825int tcp_set_congestion_control(struct sock *sk, const char *name);
827void tcp_slow_start(struct tcp_sock *tp); 826int tcp_slow_start(struct tcp_sock *tp, u32 acked);
828void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w); 827void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
829 828
830extern struct tcp_congestion_ops tcp_init_congestion_ops; 829extern struct tcp_congestion_ops tcp_init_congestion_ops;
831u32 tcp_reno_ssthresh(struct sock *sk); 830u32 tcp_reno_ssthresh(struct sock *sk);
832void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight); 831void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
833u32 tcp_reno_min_cwnd(const struct sock *sk); 832u32 tcp_reno_min_cwnd(const struct sock *sk);
834extern struct tcp_congestion_ops tcp_reno; 833extern struct tcp_congestion_ops tcp_reno;
835 834