diff options
author | Stephen Hemminger <shemminger@linux-foundation.org> | 2007-03-06 23:21:20 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-04-26 01:23:48 -0400 |
commit | 43e683926f808cec9802466c27cee7499eda3d11 (patch) | |
tree | 869b0138282a4f4391906f805fbf272e51f296f7 | |
parent | c5f5877c043ca471c3a607fa2c864848b19bc49a (diff) |
[TCP] TCP Yeah: cleanup
Eliminate need for full 6/4/64 divide to compute queue.
Variable maxqueue was really a constant.
Fix indentation.
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/ipv4/tcp_yeah.c | 42 |
1 files changed, 23 insertions, 19 deletions
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c index 18355a2608e1..46dd1bee583a 100644 --- a/net/ipv4/tcp_yeah.c +++ b/net/ipv4/tcp_yeah.c | |||
@@ -74,7 +74,7 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked) | |||
74 | } | 74 | } |
75 | 75 | ||
76 | static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, | 76 | static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, |
77 | u32 seq_rtt, u32 in_flight, int flag) | 77 | u32 seq_rtt, u32 in_flight, int flag) |
78 | { | 78 | { |
79 | struct tcp_sock *tp = tcp_sk(sk); | 79 | struct tcp_sock *tp = tcp_sk(sk); |
80 | struct yeah *yeah = inet_csk_ca(sk); | 80 | struct yeah *yeah = inet_csk_ca(sk); |
@@ -142,8 +142,8 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, | |||
142 | */ | 142 | */ |
143 | 143 | ||
144 | if (yeah->cntRTT > 2) { | 144 | if (yeah->cntRTT > 2) { |
145 | u32 rtt; | 145 | u32 rtt, queue; |
146 | u32 queue, maxqueue; | 146 | u64 bw; |
147 | 147 | ||
148 | /* We have enough RTT samples, so, using the Vegas | 148 | /* We have enough RTT samples, so, using the Vegas |
149 | * algorithm, we determine if we should increase or | 149 | * algorithm, we determine if we should increase or |
@@ -158,32 +158,36 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, | |||
158 | */ | 158 | */ |
159 | rtt = yeah->minRTT; | 159 | rtt = yeah->minRTT; |
160 | 160 | ||
161 | queue = (u32)div64_64((u64)tp->snd_cwnd * (rtt - yeah->baseRTT), rtt); | 161 | /* Compute excess number of packets above bandwidth |
162 | 162 | * Avoid doing full 64 bit divide. | |
163 | maxqueue = TCP_YEAH_ALPHA; | 163 | */ |
164 | 164 | bw = tp->snd_cwnd; | |
165 | if (queue > maxqueue || | 165 | bw *= rtt - yeah->baseRTT; |
166 | rtt - yeah->baseRTT > (yeah->baseRTT / TCP_YEAH_PHY)) { | 166 | do_div(bw, rtt); |
167 | 167 | queue = bw; | |
168 | if (queue > maxqueue && tp->snd_cwnd > yeah->reno_count) { | 168 | |
169 | u32 reduction = min( queue / TCP_YEAH_GAMMA , | 169 | if (queue > TCP_YEAH_ALPHA || |
170 | tp->snd_cwnd >> TCP_YEAH_EPSILON ); | 170 | rtt - yeah->baseRTT > (yeah->baseRTT / TCP_YEAH_PHY)) { |
171 | if (queue > TCP_YEAH_ALPHA | ||
172 | && tp->snd_cwnd > yeah->reno_count) { | ||
173 | u32 reduction = min(queue / TCP_YEAH_GAMMA , | ||
174 | tp->snd_cwnd >> TCP_YEAH_EPSILON); | ||
171 | 175 | ||
172 | tp->snd_cwnd -= reduction; | 176 | tp->snd_cwnd -= reduction; |
173 | 177 | ||
174 | tp->snd_cwnd = max( tp->snd_cwnd, yeah->reno_count); | 178 | tp->snd_cwnd = max(tp->snd_cwnd, |
179 | yeah->reno_count); | ||
175 | 180 | ||
176 | tp->snd_ssthresh = tp->snd_cwnd; | 181 | tp->snd_ssthresh = tp->snd_cwnd; |
177 | } | 182 | } |
178 | 183 | ||
179 | if (yeah->reno_count <= 2) | 184 | if (yeah->reno_count <= 2) |
180 | yeah->reno_count = max( tp->snd_cwnd>>1, 2U); | 185 | yeah->reno_count = max(tp->snd_cwnd>>1, 2U); |
181 | else | 186 | else |
182 | yeah->reno_count++; | 187 | yeah->reno_count++; |
183 | 188 | ||
184 | yeah->doing_reno_now = | 189 | yeah->doing_reno_now = min(yeah->doing_reno_now + 1, |
185 | min_t( u32, yeah->doing_reno_now + 1 , 0xffffff); | 190 | 0xffffffU); |
186 | |||
187 | } else { | 191 | } else { |
188 | yeah->fast_count++; | 192 | yeah->fast_count++; |
189 | 193 | ||