aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_westwood.c
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@osdl.org>2006-06-05 20:30:08 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-06-18 00:29:29 -0400
commit72dc5b9225c53310c010b68a70ea97c8c8e24bdf (patch)
treeebd23e7cbe9846414b6fa8f8327f37043447e019 /net/ipv4/tcp_westwood.c
parenta4ed25849532728effaa0665c92e08e029e41407 (diff)
[TCP]: Minimum congestion window consolidation.
Many of the TCP congestion methods all just use ssthresh as the minimum congestion window on decrease. Rather than duplicating the code, just have that be the default if that handle in the ops structure is not set. Minor behaviour change to TCP compound. It probably wants to use this (ssthresh) as lower bound, rather than ssthresh/2 because the latter causes undershoot on loss. Signed-off-by: Stephen Hemminger <shemminger@osdl.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_westwood.c')
-rw-r--r--net/ipv4/tcp_westwood.c18
1 files changed, 7 insertions, 11 deletions
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index 0c340c3756c2..29eb258b6d82 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -162,12 +162,6 @@ static inline u32 westwood_acked_count(struct sock *sk)
162 return w->cumul_ack; 162 return w->cumul_ack;
163} 163}
164 164
165static inline u32 westwood_bw_rttmin(const struct sock *sk)
166{
167 const struct tcp_sock *tp = tcp_sk(sk);
168 const struct westwood *w = inet_csk_ca(sk);
169 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
170}
171 165
172/* 166/*
173 * TCP Westwood 167 * TCP Westwood
@@ -175,9 +169,11 @@ static inline u32 westwood_bw_rttmin(const struct sock *sk)
175 * in packets we use mss_cache). Rttmin is guaranteed to be >= 2 169 * in packets we use mss_cache). Rttmin is guaranteed to be >= 2
176 * so avoids ever returning 0. 170 * so avoids ever returning 0.
177 */ 171 */
178static u32 tcp_westwood_cwnd_min(struct sock *sk) 172static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
179{ 173{
180 return westwood_bw_rttmin(sk); 174 const struct tcp_sock *tp = tcp_sk(sk);
175 const struct westwood *w = inet_csk_ca(sk);
176 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
181} 177}
182 178
183static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event) 179static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
@@ -191,11 +187,11 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
191 break; 187 break;
192 188
193 case CA_EVENT_COMPLETE_CWR: 189 case CA_EVENT_COMPLETE_CWR:
194 tp->snd_cwnd = tp->snd_ssthresh = westwood_bw_rttmin(sk); 190 tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
195 break; 191 break;
196 192
197 case CA_EVENT_FRTO: 193 case CA_EVENT_FRTO:
198 tp->snd_ssthresh = westwood_bw_rttmin(sk); 194 tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
199 break; 195 break;
200 196
201 case CA_EVENT_SLOW_ACK: 197 case CA_EVENT_SLOW_ACK:
@@ -235,7 +231,7 @@ static struct tcp_congestion_ops tcp_westwood = {
235 .init = tcp_westwood_init, 231 .init = tcp_westwood_init,
236 .ssthresh = tcp_reno_ssthresh, 232 .ssthresh = tcp_reno_ssthresh,
237 .cong_avoid = tcp_reno_cong_avoid, 233 .cong_avoid = tcp_reno_cong_avoid,
238 .min_cwnd = tcp_westwood_cwnd_min, 234 .min_cwnd = tcp_westwood_bw_rttmin,
239 .cwnd_event = tcp_westwood_event, 235 .cwnd_event = tcp_westwood_event,
240 .get_info = tcp_westwood_info, 236 .get_info = tcp_westwood_info,
241 .pkts_acked = tcp_westwood_pkts_acked, 237 .pkts_acked = tcp_westwood_pkts_acked,