aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorBaruch Even <baruch@ev-en.org>2006-03-21 01:23:10 -0500
committerDavid S. Miller <davem@davemloft.net>2006-03-21 01:23:10 -0500
commit50bf3e224a2963c6dd5098f77bd7233222ebfbd2 (patch)
tree6429bcfa043d34db73ef1bdb48243726e33f8132 /net/ipv4
parent0bc6d90b82775113bbbe371f5d9fcffefa5fa94d (diff)
[TCP] H-TCP: Better time accounting
Instead of estimating the time since the last congestion event, count it directly. Signed-off-by: Baruch Even <baruch@ev-en.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp_htcp.c44
1 files changed, 26 insertions, 18 deletions
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index ac19252e34ce..1b2ff53f98ed 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -27,13 +27,12 @@ struct htcp {
27 u16 alpha; /* Fixed point arith, << 7 */ 27 u16 alpha; /* Fixed point arith, << 7 */
28 u8 beta; /* Fixed point arith, << 7 */ 28 u8 beta; /* Fixed point arith, << 7 */
29 u8 modeswitch; /* Delay modeswitch until we had at least one congestion event */ 29 u8 modeswitch; /* Delay modeswitch until we had at least one congestion event */
30 u8 ccount; /* Number of RTTs since last congestion event */ 30 u32 last_cong; /* Time since last congestion event end */
31 u8 undo_ccount; 31 u32 undo_last_cong;
32 u16 pkts_acked; 32 u16 pkts_acked;
33 u32 packetcount; 33 u32 packetcount;
34 u32 minRTT; 34 u32 minRTT;
35 u32 maxRTT; 35 u32 maxRTT;
36 u32 snd_cwnd_cnt2;
37 36
38 u32 undo_maxRTT; 37 u32 undo_maxRTT;
39 u32 undo_old_maxB; 38 u32 undo_old_maxB;
@@ -46,21 +45,30 @@ struct htcp {
46 u32 lasttime; 45 u32 lasttime;
47}; 46};
48 47
48static inline u32 htcp_cong_time(struct htcp *ca)
49{
50 return jiffies - ca->last_cong;
51}
52
53static inline u32 htcp_ccount(struct htcp *ca)
54{
55 return htcp_cong_time(ca)/ca->minRTT;
56}
57
49static inline void htcp_reset(struct htcp *ca) 58static inline void htcp_reset(struct htcp *ca)
50{ 59{
51 ca->undo_ccount = ca->ccount; 60 ca->undo_last_cong = ca->last_cong;
52 ca->undo_maxRTT = ca->maxRTT; 61 ca->undo_maxRTT = ca->maxRTT;
53 ca->undo_old_maxB = ca->old_maxB; 62 ca->undo_old_maxB = ca->old_maxB;
54 63
55 ca->ccount = 0; 64 ca->last_cong = jiffies;
56 ca->snd_cwnd_cnt2 = 0;
57} 65}
58 66
59static u32 htcp_cwnd_undo(struct sock *sk) 67static u32 htcp_cwnd_undo(struct sock *sk)
60{ 68{
61 const struct tcp_sock *tp = tcp_sk(sk); 69 const struct tcp_sock *tp = tcp_sk(sk);
62 struct htcp *ca = inet_csk_ca(sk); 70 struct htcp *ca = inet_csk_ca(sk);
63 ca->ccount = ca->undo_ccount; 71 ca->last_cong = ca->undo_last_cong;
64 ca->maxRTT = ca->undo_maxRTT; 72 ca->maxRTT = ca->undo_maxRTT;
65 ca->old_maxB = ca->undo_old_maxB; 73 ca->old_maxB = ca->undo_old_maxB;
66 return max(tp->snd_cwnd, (tp->snd_ssthresh<<7)/ca->beta); 74 return max(tp->snd_cwnd, (tp->snd_ssthresh<<7)/ca->beta);
@@ -78,7 +86,7 @@ static inline void measure_rtt(struct sock *sk)
78 ca->minRTT = srtt; 86 ca->minRTT = srtt;
79 87
80 /* max RTT */ 88 /* max RTT */
81 if (icsk->icsk_ca_state == TCP_CA_Open && tp->snd_ssthresh < 0xFFFF && ca->ccount > 3) { 89 if (icsk->icsk_ca_state == TCP_CA_Open && tp->snd_ssthresh < 0xFFFF && htcp_ccount(ca) > 3) {
82 if (ca->maxRTT < ca->minRTT) 90 if (ca->maxRTT < ca->minRTT)
83 ca->maxRTT = ca->minRTT; 91 ca->maxRTT = ca->minRTT;
84 if (ca->maxRTT < srtt && srtt <= ca->maxRTT+msecs_to_jiffies(20)) 92 if (ca->maxRTT < srtt && srtt <= ca->maxRTT+msecs_to_jiffies(20))
@@ -113,7 +121,7 @@ static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked)
113 && now - ca->lasttime >= ca->minRTT 121 && now - ca->lasttime >= ca->minRTT
114 && ca->minRTT > 0) { 122 && ca->minRTT > 0) {
115 __u32 cur_Bi = ca->packetcount*HZ/(now - ca->lasttime); 123 __u32 cur_Bi = ca->packetcount*HZ/(now - ca->lasttime);
116 if (ca->ccount <= 3) { 124 if (htcp_ccount(ca) <= 3) {
117 /* just after backoff */ 125 /* just after backoff */
118 ca->minB = ca->maxB = ca->Bi = cur_Bi; 126 ca->minB = ca->maxB = ca->Bi = cur_Bi;
119 } else { 127 } else {
@@ -158,7 +166,7 @@ static inline void htcp_alpha_update(struct htcp *ca)
158{ 166{
159 u32 minRTT = ca->minRTT; 167 u32 minRTT = ca->minRTT;
160 u32 factor = 1; 168 u32 factor = 1;
161 u32 diff = ca->ccount * minRTT; /* time since last backoff */ 169 u32 diff = htcp_cong_time(ca);
162 170
163 if (diff > HZ) { 171 if (diff > HZ) {
164 diff -= HZ; 172 diff -= HZ;
@@ -223,14 +231,6 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
223 231
224 measure_rtt(sk); 232 measure_rtt(sk);
225 233
226 /* keep track of number of round-trip times since last backoff event */
227 if (ca->snd_cwnd_cnt2 >= tp->snd_cwnd) {
228 ca->ccount++;
229 ca->snd_cwnd_cnt2 -= tp->snd_cwnd;
230 htcp_alpha_update(ca);
231 } else
232 ca->snd_cwnd_cnt2 += ca->pkts_acked;
233
234 /* In dangerous area, increase slowly. 234 /* In dangerous area, increase slowly.
235 * In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd 235 * In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd
236 */ 236 */
@@ -238,6 +238,7 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
238 if (tp->snd_cwnd < tp->snd_cwnd_clamp) 238 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
239 tp->snd_cwnd++; 239 tp->snd_cwnd++;
240 tp->snd_cwnd_cnt = 0; 240 tp->snd_cwnd_cnt = 0;
241 htcp_alpha_update(ca);
241 } else 242 } else
242 tp->snd_cwnd_cnt += ca->pkts_acked; 243 tp->snd_cwnd_cnt += ca->pkts_acked;
243 244
@@ -261,11 +262,18 @@ static void htcp_init(struct sock *sk)
261 ca->alpha = ALPHA_BASE; 262 ca->alpha = ALPHA_BASE;
262 ca->beta = BETA_MIN; 263 ca->beta = BETA_MIN;
263 ca->pkts_acked = 1; 264 ca->pkts_acked = 1;
265 ca->last_cong = jiffies;
264} 266}
265 267
266static void htcp_state(struct sock *sk, u8 new_state) 268static void htcp_state(struct sock *sk, u8 new_state)
267{ 269{
268 switch (new_state) { 270 switch (new_state) {
271 case TCP_CA_Open:
272 {
273 struct htcp *ca = inet_csk_ca(sk);
274 ca->last_cong = jiffies;
275 }
276 break;
269 case TCP_CA_CWR: 277 case TCP_CA_CWR:
270 case TCP_CA_Recovery: 278 case TCP_CA_Recovery:
271 case TCP_CA_Loss: 279 case TCP_CA_Loss: