aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_veno.c
diff options
context:
space:
mode:
authorYuchung Cheng <ycheng@google.com>2013-10-31 14:07:31 -0400
committerDavid S. Miller <davem@davemloft.net>2013-11-04 19:57:59 -0500
commit9f9843a751d0a2057f9f3d313886e7e5e6ebaac9 (patch)
treea89df5cc0c5f5280b2cfffba7f6933e4db20736f /net/ipv4/tcp_veno.c
parent0d41cca490c274352211efac50e9598d39a9dc80 (diff)
tcp: properly handle stretch acks in slow start
Slow start now increases cwnd by 1 if an ACK acknowledges some packets, regardless the number of packets. Consequently slow start performance is highly dependent on the degree of the stretch ACKs caused by receiver or network ACK compression mechanisms (e.g., delayed-ACK, GRO, etc). But slow start algorithm is to send twice the amount of packets of packets left so it should process a stretch ACK of degree N as if N ACKs of degree 1, then exits when cwnd exceeds ssthresh. A follow up patch will use the remainder of the N (if greater than 1) to adjust cwnd in the congestion avoidance phase. In addition this patch retires the experimental limited slow start (LSS) feature. LSS has multiple drawbacks but questionable benefit. The fractional cwnd increase in LSS requires a loop in slow start even though it's rarely used. Configuring such an increase step via a global sysctl on different BDPS seems hard. Finally and most importantly the slow start overshoot concern is now better covered by the Hybrid slow start (hystart) enabled by default. Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_veno.c')
-rw-r--r--net/ipv4/tcp_veno.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index ac43cd747bce..326475a94865 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -114,13 +114,14 @@ static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event)
114 tcp_veno_init(sk); 114 tcp_veno_init(sk);
115} 115}
116 116
117static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 117static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked,
118 u32 in_flight)
118{ 119{
119 struct tcp_sock *tp = tcp_sk(sk); 120 struct tcp_sock *tp = tcp_sk(sk);
120 struct veno *veno = inet_csk_ca(sk); 121 struct veno *veno = inet_csk_ca(sk);
121 122
122 if (!veno->doing_veno_now) { 123 if (!veno->doing_veno_now) {
123 tcp_reno_cong_avoid(sk, ack, in_flight); 124 tcp_reno_cong_avoid(sk, ack, acked, in_flight);
124 return; 125 return;
125 } 126 }
126 127
@@ -133,7 +134,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
133 /* We don't have enough rtt samples to do the Veno 134 /* We don't have enough rtt samples to do the Veno
134 * calculation, so we'll behave like Reno. 135 * calculation, so we'll behave like Reno.
135 */ 136 */
136 tcp_reno_cong_avoid(sk, ack, in_flight); 137 tcp_reno_cong_avoid(sk, ack, acked, in_flight);
137 } else { 138 } else {
138 u64 target_cwnd; 139 u64 target_cwnd;
139 u32 rtt; 140 u32 rtt;
@@ -152,7 +153,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
152 153
153 if (tp->snd_cwnd <= tp->snd_ssthresh) { 154 if (tp->snd_cwnd <= tp->snd_ssthresh) {
154 /* Slow start. */ 155 /* Slow start. */
155 tcp_slow_start(tp); 156 tcp_slow_start(tp, acked);
156 } else { 157 } else {
157 /* Congestion avoidance. */ 158 /* Congestion avoidance. */
158 if (veno->diff < beta) { 159 if (veno->diff < beta) {