aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-12-08 13:27:44 -0500
committerDavid S. Miller <davem@davemloft.net>2017-12-08 13:27:44 -0500
commitb25b3e2fd9ca9b7907a53baf46dea4074ed5e062 (patch)
treea64b22bb53800a2108db54974f7a9dd649baae58
parentd4a7a8893d4cdbc89d79ac4aa704bf8d4b67b368 (diff)
parent600647d467c6d04b3954b41a6ee1795b5ae00550 (diff)
Merge branch 'tcp-bbr-sampling-fixes'
Neal Cardwell says: ==================== TCP BBR sampling fixes for loss recovery undo This patch series has a few minor bug fixes for cases where spurious loss recoveries can trick BBR estimators into estimating that the available bandwidth is much lower than the true available bandwidth. In both cases the fix here is to just reset the estimator upon loss recovery undo. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv4/tcp_bbr.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 69ee877574d0..8322f26e770e 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -110,7 +110,8 @@ struct bbr {
110 u32 lt_last_lost; /* LT intvl start: tp->lost */ 110 u32 lt_last_lost; /* LT intvl start: tp->lost */
111 u32 pacing_gain:10, /* current gain for setting pacing rate */ 111 u32 pacing_gain:10, /* current gain for setting pacing rate */
112 cwnd_gain:10, /* current gain for setting cwnd */ 112 cwnd_gain:10, /* current gain for setting cwnd */
113 full_bw_cnt:3, /* number of rounds without large bw gains */ 113 full_bw_reached:1, /* reached full bw in Startup? */
114 full_bw_cnt:2, /* number of rounds without large bw gains */
114 cycle_idx:3, /* current index in pacing_gain cycle array */ 115 cycle_idx:3, /* current index in pacing_gain cycle array */
115 has_seen_rtt:1, /* have we seen an RTT sample yet? */ 116 has_seen_rtt:1, /* have we seen an RTT sample yet? */
116 unused_b:5; 117 unused_b:5;
@@ -180,7 +181,7 @@ static bool bbr_full_bw_reached(const struct sock *sk)
180{ 181{
181 const struct bbr *bbr = inet_csk_ca(sk); 182 const struct bbr *bbr = inet_csk_ca(sk);
182 183
183 return bbr->full_bw_cnt >= bbr_full_bw_cnt; 184 return bbr->full_bw_reached;
184} 185}
185 186
186/* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */ 187/* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
@@ -717,6 +718,7 @@ static void bbr_check_full_bw_reached(struct sock *sk,
717 return; 718 return;
718 } 719 }
719 ++bbr->full_bw_cnt; 720 ++bbr->full_bw_cnt;
721 bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt;
720} 722}
721 723
722/* If pipe is probably full, drain the queue and then enter steady-state. */ 724/* If pipe is probably full, drain the queue and then enter steady-state. */
@@ -850,6 +852,7 @@ static void bbr_init(struct sock *sk)
850 bbr->restore_cwnd = 0; 852 bbr->restore_cwnd = 0;
851 bbr->round_start = 0; 853 bbr->round_start = 0;
852 bbr->idle_restart = 0; 854 bbr->idle_restart = 0;
855 bbr->full_bw_reached = 0;
853 bbr->full_bw = 0; 856 bbr->full_bw = 0;
854 bbr->full_bw_cnt = 0; 857 bbr->full_bw_cnt = 0;
855 bbr->cycle_mstamp = 0; 858 bbr->cycle_mstamp = 0;
@@ -871,6 +874,11 @@ static u32 bbr_sndbuf_expand(struct sock *sk)
871 */ 874 */
872static u32 bbr_undo_cwnd(struct sock *sk) 875static u32 bbr_undo_cwnd(struct sock *sk)
873{ 876{
877 struct bbr *bbr = inet_csk_ca(sk);
878
879 bbr->full_bw = 0; /* spurious slow-down; reset full pipe detection */
880 bbr->full_bw_cnt = 0;
881 bbr_reset_lt_bw_sampling(sk);
874 return tcp_sk(sk)->snd_cwnd; 882 return tcp_sk(sk)->snd_cwnd;
875} 883}
876 884