aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBaruch Even <baruch@ev-en.org>2006-03-21 01:22:47 -0500
committerDavid S. Miller <davem@davemloft.net>2006-03-21 01:22:47 -0500
commit0bc6d90b82775113bbbe371f5d9fcffefa5fa94d (patch)
tree5597a249a1ad015c226f2c94192de5efec9092d3
parentc33ad6e476e4cdc245215f3eb5b3df353df1b370 (diff)
[TCP] H-TCP: Account for delayed-ACKs
Account for delayed-ACKs in H-TCP. Delayed-ACKs cause H-TCP to be less aggressive than its design calls for. It is especially true when the receiver is a Linux machine where the average delayed ack is over 3 packets with values of 7 not unheard of. Signed-off-By: Baruch Even <baruch@ev-en.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv4/tcp_htcp.c26
1 files changed, 18 insertions, 8 deletions
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index fda2f873599e..ac19252e34ce 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -29,7 +29,8 @@ struct htcp {
29 u8 modeswitch; /* Delay modeswitch until we had at least one congestion event */ 29 u8 modeswitch; /* Delay modeswitch until we had at least one congestion event */
30 u8 ccount; /* Number of RTTs since last congestion event */ 30 u8 ccount; /* Number of RTTs since last congestion event */
31 u8 undo_ccount; 31 u8 undo_ccount;
32 u16 packetcount; 32 u16 pkts_acked;
33 u32 packetcount;
33 u32 minRTT; 34 u32 minRTT;
34 u32 maxRTT; 35 u32 maxRTT;
35 u32 snd_cwnd_cnt2; 36 u32 snd_cwnd_cnt2;
@@ -92,6 +93,12 @@ static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked)
92 struct htcp *ca = inet_csk_ca(sk); 93 struct htcp *ca = inet_csk_ca(sk);
93 u32 now = tcp_time_stamp; 94 u32 now = tcp_time_stamp;
94 95
96 if (icsk->icsk_ca_state == TCP_CA_Open)
97 ca->pkts_acked = pkts_acked;
98
99 if (!use_bandwidth_switch)
100 return;
101
95 /* achieved throughput calculations */ 102 /* achieved throughput calculations */
96 if (icsk->icsk_ca_state != TCP_CA_Open && 103 if (icsk->icsk_ca_state != TCP_CA_Open &&
97 icsk->icsk_ca_state != TCP_CA_Disorder) { 104 icsk->icsk_ca_state != TCP_CA_Disorder) {
@@ -217,20 +224,24 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
217 measure_rtt(sk); 224 measure_rtt(sk);
218 225
219 /* keep track of number of round-trip times since last backoff event */ 226 /* keep track of number of round-trip times since last backoff event */
220 if (ca->snd_cwnd_cnt2++ > tp->snd_cwnd) { 227 if (ca->snd_cwnd_cnt2 >= tp->snd_cwnd) {
221 ca->ccount++; 228 ca->ccount++;
222 ca->snd_cwnd_cnt2 = 0; 229 ca->snd_cwnd_cnt2 -= tp->snd_cwnd;
223 htcp_alpha_update(ca); 230 htcp_alpha_update(ca);
224 } 231 } else
232 ca->snd_cwnd_cnt2 += ca->pkts_acked;
225 233
226 /* In dangerous area, increase slowly. 234 /* In dangerous area, increase slowly.
227 * In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd 235 * In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd
228 */ 236 */
229 if ((tp->snd_cwnd_cnt++ * ca->alpha)>>7 >= tp->snd_cwnd) { 237 if ((tp->snd_cwnd_cnt * ca->alpha)>>7 >= tp->snd_cwnd) {
230 if (tp->snd_cwnd < tp->snd_cwnd_clamp) 238 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
231 tp->snd_cwnd++; 239 tp->snd_cwnd++;
232 tp->snd_cwnd_cnt = 0; 240 tp->snd_cwnd_cnt = 0;
233 } 241 } else
242 tp->snd_cwnd_cnt += ca->pkts_acked;
243
244 ca->pkts_acked = 1;
234 } 245 }
235} 246}
236 247
@@ -249,6 +260,7 @@ static void htcp_init(struct sock *sk)
249 memset(ca, 0, sizeof(struct htcp)); 260 memset(ca, 0, sizeof(struct htcp));
250 ca->alpha = ALPHA_BASE; 261 ca->alpha = ALPHA_BASE;
251 ca->beta = BETA_MIN; 262 ca->beta = BETA_MIN;
263 ca->pkts_acked = 1;
252} 264}
253 265
254static void htcp_state(struct sock *sk, u8 new_state) 266static void htcp_state(struct sock *sk, u8 new_state)
@@ -278,8 +290,6 @@ static int __init htcp_register(void)
278{ 290{
279 BUG_ON(sizeof(struct htcp) > ICSK_CA_PRIV_SIZE); 291 BUG_ON(sizeof(struct htcp) > ICSK_CA_PRIV_SIZE);
280 BUILD_BUG_ON(BETA_MIN >= BETA_MAX); 292 BUILD_BUG_ON(BETA_MIN >= BETA_MAX);
281 if (!use_bandwidth_switch)
282 htcp.pkts_acked = NULL;
283 return tcp_register_congestion_control(&htcp); 293 return tcp_register_congestion_control(&htcp);
284} 294}
285 295