diff options
Diffstat (limited to 'net/ipv4/tcp_westwood.c')
| -rw-r--r-- | net/ipv4/tcp_westwood.c | 80 |
1 files changed, 62 insertions, 18 deletions
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c index 0c340c3756c2..4247da1384bf 100644 --- a/net/ipv4/tcp_westwood.c +++ b/net/ipv4/tcp_westwood.c | |||
| @@ -1,7 +1,24 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * TCP Westwood+ | 2 | * TCP Westwood+: end-to-end bandwidth estimation for TCP |
| 3 | * | 3 | * |
| 4 | * Angelo Dell'Aera: TCP Westwood+ support | 4 | * Angelo Dell'Aera: author of the first version of TCP Westwood+ in Linux 2.4 |
| 5 | * | ||
| 6 | * Support at http://c3lab.poliba.it/index.php/Westwood | ||
| 7 | * Main references in literature: | ||
| 8 | * | ||
| 9 | * - Mascolo S, Casetti, M. Gerla et al. | ||
| 10 | * "TCP Westwood: bandwidth estimation for TCP" Proc. ACM Mobicom 2001 | ||
| 11 | * | ||
| 12 | * - A. Grieco, s. Mascolo | ||
| 13 | * "Performance evaluation of New Reno, Vegas, Westwood+ TCP" ACM Computer | ||
| 14 | * Comm. Review, 2004 | ||
| 15 | * | ||
| 16 | * - A. Dell'Aera, L. Grieco, S. Mascolo. | ||
| 17 | * "Linux 2.4 Implementation of Westwood+ TCP with Rate-Halving : | ||
| 18 | * A Performance Evaluation Over the Internet" (ICC 2004), Paris, June 2004 | ||
| 19 | * | ||
| 20 | * Westwood+ employs end-to-end bandwidth measurement to set cwnd and | ||
| 21 | * ssthresh after packet loss. The probing phase is as the original Reno. | ||
| 5 | */ | 22 | */ |
| 6 | 23 | ||
| 7 | #include <linux/config.h> | 24 | #include <linux/config.h> |
| @@ -22,6 +39,8 @@ struct westwood { | |||
| 22 | u32 accounted; | 39 | u32 accounted; |
| 23 | u32 rtt; | 40 | u32 rtt; |
| 24 | u32 rtt_min; /* minimum observed RTT */ | 41 | u32 rtt_min; /* minimum observed RTT */ |
| 42 | u8 first_ack; /* flag which infers that this is the first ack */ | ||
| 43 | u8 reset_rtt_min; /* Reset RTT min to next RTT sample*/ | ||
| 25 | }; | 44 | }; |
| 26 | 45 | ||
| 27 | 46 | ||
| @@ -49,9 +68,11 @@ static void tcp_westwood_init(struct sock *sk) | |||
| 49 | w->bw_est = 0; | 68 | w->bw_est = 0; |
| 50 | w->accounted = 0; | 69 | w->accounted = 0; |
| 51 | w->cumul_ack = 0; | 70 | w->cumul_ack = 0; |
| 71 | w->reset_rtt_min = 1; | ||
| 52 | w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT; | 72 | w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT; |
| 53 | w->rtt_win_sx = tcp_time_stamp; | 73 | w->rtt_win_sx = tcp_time_stamp; |
| 54 | w->snd_una = tcp_sk(sk)->snd_una; | 74 | w->snd_una = tcp_sk(sk)->snd_una; |
| 75 | w->first_ack = 1; | ||
| 55 | } | 76 | } |
| 56 | 77 | ||
| 57 | /* | 78 | /* |
| @@ -63,10 +84,16 @@ static inline u32 westwood_do_filter(u32 a, u32 b) | |||
| 63 | return (((7 * a) + b) >> 3); | 84 | return (((7 * a) + b) >> 3); |
| 64 | } | 85 | } |
| 65 | 86 | ||
| 66 | static inline void westwood_filter(struct westwood *w, u32 delta) | 87 | static void westwood_filter(struct westwood *w, u32 delta) |
| 67 | { | 88 | { |
| 68 | w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta); | 89 | /* If the filter is empty fill it with the first sample of bandwidth */ |
| 69 | w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est); | 90 | if (w->bw_ns_est == 0 && w->bw_est == 0) { |
| 91 | w->bw_ns_est = w->bk / delta; | ||
| 92 | w->bw_est = w->bw_ns_est; | ||
| 93 | } else { | ||
| 94 | w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta); | ||
| 95 | w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est); | ||
| 96 | } | ||
| 70 | } | 97 | } |
| 71 | 98 | ||
| 72 | /* | 99 | /* |
| @@ -91,6 +118,15 @@ static void westwood_update_window(struct sock *sk) | |||
| 91 | struct westwood *w = inet_csk_ca(sk); | 118 | struct westwood *w = inet_csk_ca(sk); |
| 92 | s32 delta = tcp_time_stamp - w->rtt_win_sx; | 119 | s32 delta = tcp_time_stamp - w->rtt_win_sx; |
| 93 | 120 | ||
| 121 | /* Initialize w->snd_una with the first acked sequence number in order | ||
| 122 | * to fix mismatch between tp->snd_una and w->snd_una for the first | ||
| 123 | * bandwidth sample | ||
| 124 | */ | ||
| 125 | if (w->first_ack) { | ||
| 126 | w->snd_una = tcp_sk(sk)->snd_una; | ||
| 127 | w->first_ack = 0; | ||
| 128 | } | ||
| 129 | |||
| 94 | /* | 130 | /* |
| 95 | * See if a RTT-window has passed. | 131 | * See if a RTT-window has passed. |
| 96 | * Be careful since if RTT is less than | 132 | * Be careful since if RTT is less than |
| @@ -108,6 +144,16 @@ static void westwood_update_window(struct sock *sk) | |||
| 108 | } | 144 | } |
| 109 | } | 145 | } |
| 110 | 146 | ||
| 147 | static inline void update_rtt_min(struct westwood *w) | ||
| 148 | { | ||
| 149 | if (w->reset_rtt_min) { | ||
| 150 | w->rtt_min = w->rtt; | ||
| 151 | w->reset_rtt_min = 0; | ||
| 152 | } else | ||
| 153 | w->rtt_min = min(w->rtt, w->rtt_min); | ||
| 154 | } | ||
| 155 | |||
| 156 | |||
| 111 | /* | 157 | /* |
| 112 | * @westwood_fast_bw | 158 | * @westwood_fast_bw |
| 113 | * It is called when we are in fast path. In particular it is called when | 159 | * It is called when we are in fast path. In particular it is called when |
| @@ -123,7 +169,7 @@ static inline void westwood_fast_bw(struct sock *sk) | |||
| 123 | 169 | ||
| 124 | w->bk += tp->snd_una - w->snd_una; | 170 | w->bk += tp->snd_una - w->snd_una; |
| 125 | w->snd_una = tp->snd_una; | 171 | w->snd_una = tp->snd_una; |
| 126 | w->rtt_min = min(w->rtt, w->rtt_min); | 172 | update_rtt_min(w); |
| 127 | } | 173 | } |
| 128 | 174 | ||
| 129 | /* | 175 | /* |
| @@ -162,12 +208,6 @@ static inline u32 westwood_acked_count(struct sock *sk) | |||
| 162 | return w->cumul_ack; | 208 | return w->cumul_ack; |
| 163 | } | 209 | } |
| 164 | 210 | ||
| 165 | static inline u32 westwood_bw_rttmin(const struct sock *sk) | ||
| 166 | { | ||
| 167 | const struct tcp_sock *tp = tcp_sk(sk); | ||
| 168 | const struct westwood *w = inet_csk_ca(sk); | ||
| 169 | return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2); | ||
| 170 | } | ||
| 171 | 211 | ||
| 172 | /* | 212 | /* |
| 173 | * TCP Westwood | 213 | * TCP Westwood |
| @@ -175,9 +215,11 @@ static inline u32 westwood_bw_rttmin(const struct sock *sk) | |||
| 175 | * in packets we use mss_cache). Rttmin is guaranteed to be >= 2 | 215 | * in packets we use mss_cache). Rttmin is guaranteed to be >= 2 |
| 176 | * so avoids ever returning 0. | 216 | * so avoids ever returning 0. |
| 177 | */ | 217 | */ |
| 178 | static u32 tcp_westwood_cwnd_min(struct sock *sk) | 218 | static u32 tcp_westwood_bw_rttmin(const struct sock *sk) |
| 179 | { | 219 | { |
| 180 | return westwood_bw_rttmin(sk); | 220 | const struct tcp_sock *tp = tcp_sk(sk); |
| 221 | const struct westwood *w = inet_csk_ca(sk); | ||
| 222 | return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2); | ||
| 181 | } | 223 | } |
| 182 | 224 | ||
| 183 | static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event) | 225 | static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event) |
| @@ -191,17 +233,19 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event) | |||
| 191 | break; | 233 | break; |
| 192 | 234 | ||
| 193 | case CA_EVENT_COMPLETE_CWR: | 235 | case CA_EVENT_COMPLETE_CWR: |
| 194 | tp->snd_cwnd = tp->snd_ssthresh = westwood_bw_rttmin(sk); | 236 | tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); |
| 195 | break; | 237 | break; |
| 196 | 238 | ||
| 197 | case CA_EVENT_FRTO: | 239 | case CA_EVENT_FRTO: |
| 198 | tp->snd_ssthresh = westwood_bw_rttmin(sk); | 240 | tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); |
| 241 | /* Update RTT_min when next ack arrives */ | ||
| 242 | w->reset_rtt_min = 1; | ||
| 199 | break; | 243 | break; |
| 200 | 244 | ||
| 201 | case CA_EVENT_SLOW_ACK: | 245 | case CA_EVENT_SLOW_ACK: |
| 202 | westwood_update_window(sk); | 246 | westwood_update_window(sk); |
| 203 | w->bk += westwood_acked_count(sk); | 247 | w->bk += westwood_acked_count(sk); |
| 204 | w->rtt_min = min(w->rtt, w->rtt_min); | 248 | update_rtt_min(w); |
| 205 | break; | 249 | break; |
| 206 | 250 | ||
| 207 | default: | 251 | default: |
| @@ -235,7 +279,7 @@ static struct tcp_congestion_ops tcp_westwood = { | |||
| 235 | .init = tcp_westwood_init, | 279 | .init = tcp_westwood_init, |
| 236 | .ssthresh = tcp_reno_ssthresh, | 280 | .ssthresh = tcp_reno_ssthresh, |
| 237 | .cong_avoid = tcp_reno_cong_avoid, | 281 | .cong_avoid = tcp_reno_cong_avoid, |
| 238 | .min_cwnd = tcp_westwood_cwnd_min, | 282 | .min_cwnd = tcp_westwood_bw_rttmin, |
| 239 | .cwnd_event = tcp_westwood_event, | 283 | .cwnd_event = tcp_westwood_event, |
| 240 | .get_info = tcp_westwood_info, | 284 | .get_info = tcp_westwood_info, |
| 241 | .pkts_acked = tcp_westwood_pkts_acked, | 285 | .pkts_acked = tcp_westwood_pkts_acked, |
