diff options
Diffstat (limited to 'net/ipv4/tcp_bbr.c')
-rw-r--r-- | net/ipv4/tcp_bbr.c | 49 |
1 files changed, 38 insertions, 11 deletions
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index dbcc9352a48f..69ee877574d0 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c | |||
@@ -112,7 +112,8 @@ struct bbr { | |||
112 | cwnd_gain:10, /* current gain for setting cwnd */ | 112 | cwnd_gain:10, /* current gain for setting cwnd */ |
113 | full_bw_cnt:3, /* number of rounds without large bw gains */ | 113 | full_bw_cnt:3, /* number of rounds without large bw gains */ |
114 | cycle_idx:3, /* current index in pacing_gain cycle array */ | 114 | cycle_idx:3, /* current index in pacing_gain cycle array */ |
115 | unused_b:6; | 115 | has_seen_rtt:1, /* have we seen an RTT sample yet? */ |
116 | unused_b:5; | ||
116 | u32 prior_cwnd; /* prior cwnd upon entering loss recovery */ | 117 | u32 prior_cwnd; /* prior cwnd upon entering loss recovery */ |
117 | u32 full_bw; /* recent bw, to estimate if pipe is full */ | 118 | u32 full_bw; /* recent bw, to estimate if pipe is full */ |
118 | }; | 119 | }; |
@@ -211,6 +212,35 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain) | |||
211 | return rate >> BW_SCALE; | 212 | return rate >> BW_SCALE; |
212 | } | 213 | } |
213 | 214 | ||
215 | /* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */ | ||
216 | static u32 bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain) | ||
217 | { | ||
218 | u64 rate = bw; | ||
219 | |||
220 | rate = bbr_rate_bytes_per_sec(sk, rate, gain); | ||
221 | rate = min_t(u64, rate, sk->sk_max_pacing_rate); | ||
222 | return rate; | ||
223 | } | ||
224 | |||
225 | /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */ | ||
226 | static void bbr_init_pacing_rate_from_rtt(struct sock *sk) | ||
227 | { | ||
228 | struct tcp_sock *tp = tcp_sk(sk); | ||
229 | struct bbr *bbr = inet_csk_ca(sk); | ||
230 | u64 bw; | ||
231 | u32 rtt_us; | ||
232 | |||
233 | if (tp->srtt_us) { /* any RTT sample yet? */ | ||
234 | rtt_us = max(tp->srtt_us >> 3, 1U); | ||
235 | bbr->has_seen_rtt = 1; | ||
236 | } else { /* no RTT sample yet */ | ||
237 | rtt_us = USEC_PER_MSEC; /* use nominal default RTT */ | ||
238 | } | ||
239 | bw = (u64)tp->snd_cwnd * BW_UNIT; | ||
240 | do_div(bw, rtt_us); | ||
241 | sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain); | ||
242 | } | ||
243 | |||
214 | /* Pace using current bw estimate and a gain factor. In order to help drive the | 244 | /* Pace using current bw estimate and a gain factor. In order to help drive the |
215 | * network toward lower queues while maintaining high utilization and low | 245 | * network toward lower queues while maintaining high utilization and low |
216 | * latency, the average pacing rate aims to be slightly (~1%) lower than the | 246 | * latency, the average pacing rate aims to be slightly (~1%) lower than the |
@@ -220,12 +250,13 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain) | |||
220 | */ | 250 | */ |
221 | static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain) | 251 | static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain) |
222 | { | 252 | { |
253 | struct tcp_sock *tp = tcp_sk(sk); | ||
223 | struct bbr *bbr = inet_csk_ca(sk); | 254 | struct bbr *bbr = inet_csk_ca(sk); |
224 | u64 rate = bw; | 255 | u32 rate = bbr_bw_to_pacing_rate(sk, bw, gain); |
225 | 256 | ||
226 | rate = bbr_rate_bytes_per_sec(sk, rate, gain); | 257 | if (unlikely(!bbr->has_seen_rtt && tp->srtt_us)) |
227 | rate = min_t(u64, rate, sk->sk_max_pacing_rate); | 258 | bbr_init_pacing_rate_from_rtt(sk); |
228 | if (bbr->mode != BBR_STARTUP || rate > sk->sk_pacing_rate) | 259 | if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate) |
229 | sk->sk_pacing_rate = rate; | 260 | sk->sk_pacing_rate = rate; |
230 | } | 261 | } |
231 | 262 | ||
@@ -798,7 +829,6 @@ static void bbr_init(struct sock *sk) | |||
798 | { | 829 | { |
799 | struct tcp_sock *tp = tcp_sk(sk); | 830 | struct tcp_sock *tp = tcp_sk(sk); |
800 | struct bbr *bbr = inet_csk_ca(sk); | 831 | struct bbr *bbr = inet_csk_ca(sk); |
801 | u64 bw; | ||
802 | 832 | ||
803 | bbr->prior_cwnd = 0; | 833 | bbr->prior_cwnd = 0; |
804 | bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */ | 834 | bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */ |
@@ -814,11 +844,8 @@ static void bbr_init(struct sock *sk) | |||
814 | 844 | ||
815 | minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */ | 845 | minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */ |
816 | 846 | ||
817 | /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */ | 847 | bbr->has_seen_rtt = 0; |
818 | bw = (u64)tp->snd_cwnd * BW_UNIT; | 848 | bbr_init_pacing_rate_from_rtt(sk); |
819 | do_div(bw, (tp->srtt_us >> 3) ? : USEC_PER_MSEC); | ||
820 | sk->sk_pacing_rate = 0; /* force an update of sk_pacing_rate */ | ||
821 | bbr_set_pacing_rate(sk, bw, bbr_high_gain); | ||
822 | 849 | ||
823 | bbr->restore_cwnd = 0; | 850 | bbr->restore_cwnd = 0; |
824 | bbr->round_start = 0; | 851 | bbr->round_start = 0; |