aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/devinet.c5
-rw-r--r--net/ipv4/ip_options.c7
-rw-r--r--net/ipv4/ipvs/ip_vs_xmit.c2
-rw-r--r--net/ipv4/tcp_bic.c2
-rw-r--r--net/ipv4/tcp_cubic.c48
-rw-r--r--net/ipv4/tcp_htcp.c10
-rw-r--r--net/ipv4/tcp_illinois.c8
-rw-r--r--net/ipv4/tcp_input.c47
-rw-r--r--net/ipv4/tcp_lp.c6
-rw-r--r--net/ipv4/tcp_vegas.c6
-rw-r--r--net/ipv4/tcp_vegas.h2
-rw-r--r--net/ipv4/tcp_veno.c6
-rw-r--r--net/ipv4/tcp_westwood.c7
-rw-r--r--net/ipv4/tcp_yeah.c4
14 files changed, 81 insertions, 79 deletions
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index abf6352f990f..5b77bdaa57dd 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1056,10 +1056,9 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1056 if (!in_dev) { 1056 if (!in_dev) {
1057 if (event == NETDEV_REGISTER) { 1057 if (event == NETDEV_REGISTER) {
1058 in_dev = inetdev_init(dev); 1058 in_dev = inetdev_init(dev);
1059 if (!in_dev)
1060 return notifier_from_errno(-ENOMEM);
1059 if (dev == &loopback_dev) { 1061 if (dev == &loopback_dev) {
1060 if (!in_dev)
1061 panic("devinet: "
1062 "Failed to create loopback\n");
1063 IN_DEV_CONF_SET(in_dev, NOXFRM, 1); 1062 IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
1064 IN_DEV_CONF_SET(in_dev, NOPOLICY, 1); 1063 IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
1065 } 1064 }
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 251346828cb4..2f14745a9e1f 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -513,11 +513,8 @@ void ip_options_undo(struct ip_options * opt)
513 513
514static struct ip_options *ip_options_get_alloc(const int optlen) 514static struct ip_options *ip_options_get_alloc(const int optlen)
515{ 515{
516 struct ip_options *opt = kmalloc(sizeof(*opt) + ((optlen + 3) & ~3), 516 return kzalloc(sizeof(struct ip_options) + ((optlen + 3) & ~3),
517 GFP_KERNEL); 517 GFP_KERNEL);
518 if (opt)
519 memset(opt, 0, sizeof(*opt));
520 return opt;
521} 518}
522 519
523static int ip_options_get_finish(struct ip_options **optp, 520static int ip_options_get_finish(struct ip_options **optp,
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c
index 900ce29db382..666e080a74a3 100644
--- a/net/ipv4/ipvs/ip_vs_xmit.c
+++ b/net/ipv4/ipvs/ip_vs_xmit.c
@@ -128,7 +128,7 @@ ip_vs_dst_reset(struct ip_vs_dest *dest)
128#define IP_VS_XMIT(skb, rt) \ 128#define IP_VS_XMIT(skb, rt) \
129do { \ 129do { \
130 (skb)->ipvs_property = 1; \ 130 (skb)->ipvs_property = 1; \
131 (skb)->ip_summed = CHECKSUM_NONE; \ 131 skb_forward_csum(skb); \
132 NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, (skb), NULL, \ 132 NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, (skb), NULL, \
133 (rt)->u.dst.dev, dst_output); \ 133 (rt)->u.dst.dev, dst_output); \
134} while (0) 134} while (0)
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index 519de091a94d..4586211e3757 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -206,7 +206,7 @@ static void bictcp_state(struct sock *sk, u8 new_state)
206/* Track delayed acknowledgment ratio using sliding window 206/* Track delayed acknowledgment ratio using sliding window
207 * ratio = (15*ratio + sample) / 16 207 * ratio = (15*ratio + sample) / 16
208 */ 208 */
209static void bictcp_acked(struct sock *sk, u32 cnt, ktime_t last) 209static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt)
210{ 210{
211 const struct inet_connection_sock *icsk = inet_csk(sk); 211 const struct inet_connection_sock *icsk = inet_csk(sk);
212 212
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index d17da30d82d6..485d7ea35f75 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -246,38 +246,12 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
246 ca->cnt = 1; 246 ca->cnt = 1;
247} 247}
248 248
249
250/* Keep track of minimum rtt */
251static inline void measure_delay(struct sock *sk)
252{
253 const struct tcp_sock *tp = tcp_sk(sk);
254 struct bictcp *ca = inet_csk_ca(sk);
255 u32 delay;
256
257 /* No time stamp */
258 if (!(tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) ||
259 /* Discard delay samples right after fast recovery */
260 (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
261 return;
262
263 delay = (tcp_time_stamp - tp->rx_opt.rcv_tsecr)<<3;
264 if (delay == 0)
265 delay = 1;
266
267 /* first time call or link delay decreases */
268 if (ca->delay_min == 0 || ca->delay_min > delay)
269 ca->delay_min = delay;
270}
271
272static void bictcp_cong_avoid(struct sock *sk, u32 ack, 249static void bictcp_cong_avoid(struct sock *sk, u32 ack,
273 u32 in_flight, int data_acked) 250 u32 in_flight, int data_acked)
274{ 251{
275 struct tcp_sock *tp = tcp_sk(sk); 252 struct tcp_sock *tp = tcp_sk(sk);
276 struct bictcp *ca = inet_csk_ca(sk); 253 struct bictcp *ca = inet_csk_ca(sk);
277 254
278 if (data_acked)
279 measure_delay(sk);
280
281 if (!tcp_is_cwnd_limited(sk, in_flight)) 255 if (!tcp_is_cwnd_limited(sk, in_flight))
282 return; 256 return;
283 257
@@ -334,17 +308,33 @@ static void bictcp_state(struct sock *sk, u8 new_state)
334/* Track delayed acknowledgment ratio using sliding window 308/* Track delayed acknowledgment ratio using sliding window
335 * ratio = (15*ratio + sample) / 16 309 * ratio = (15*ratio + sample) / 16
336 */ 310 */
337static void bictcp_acked(struct sock *sk, u32 cnt, ktime_t last) 311static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
338{ 312{
339 const struct inet_connection_sock *icsk = inet_csk(sk); 313 const struct inet_connection_sock *icsk = inet_csk(sk);
314 struct bictcp *ca = inet_csk_ca(sk);
315 u32 delay;
340 316
341 if (cnt > 0 && icsk->icsk_ca_state == TCP_CA_Open) { 317 if (cnt > 0 && icsk->icsk_ca_state == TCP_CA_Open) {
342 struct bictcp *ca = inet_csk_ca(sk);
343 cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT; 318 cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT;
344 ca->delayed_ack += cnt; 319 ca->delayed_ack += cnt;
345 } 320 }
346}
347 321
322 /* Some calls are for duplicates without timetamps */
323 if (rtt_us < 0)
324 return;
325
326 /* Discard delay samples right after fast recovery */
327 if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ)
328 return;
329
330 delay = usecs_to_jiffies(rtt_us) << 3;
331 if (delay == 0)
332 delay = 1;
333
334 /* first time call or link delay decreases */
335 if (ca->delay_min == 0 || ca->delay_min > delay)
336 ca->delay_min = delay;
337}
348 338
349static struct tcp_congestion_ops cubictcp = { 339static struct tcp_congestion_ops cubictcp = {
350 .init = bictcp_init, 340 .init = bictcp_init,
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 08a02e6045c9..b66556c0a5bd 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -76,12 +76,11 @@ static u32 htcp_cwnd_undo(struct sock *sk)
76 return max(tp->snd_cwnd, (tp->snd_ssthresh << 7) / ca->beta); 76 return max(tp->snd_cwnd, (tp->snd_ssthresh << 7) / ca->beta);
77} 77}
78 78
79static inline void measure_rtt(struct sock *sk) 79static inline void measure_rtt(struct sock *sk, u32 srtt)
80{ 80{
81 const struct inet_connection_sock *icsk = inet_csk(sk); 81 const struct inet_connection_sock *icsk = inet_csk(sk);
82 const struct tcp_sock *tp = tcp_sk(sk); 82 const struct tcp_sock *tp = tcp_sk(sk);
83 struct htcp *ca = inet_csk_ca(sk); 83 struct htcp *ca = inet_csk_ca(sk);
84 u32 srtt = tp->srtt >> 3;
85 84
86 /* keep track of minimum RTT seen so far, minRTT is zero at first */ 85 /* keep track of minimum RTT seen so far, minRTT is zero at first */
87 if (ca->minRTT > srtt || !ca->minRTT) 86 if (ca->minRTT > srtt || !ca->minRTT)
@@ -98,7 +97,7 @@ static inline void measure_rtt(struct sock *sk)
98 } 97 }
99} 98}
100 99
101static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, ktime_t last) 100static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, s32 rtt)
102{ 101{
103 const struct inet_connection_sock *icsk = inet_csk(sk); 102 const struct inet_connection_sock *icsk = inet_csk(sk);
104 const struct tcp_sock *tp = tcp_sk(sk); 103 const struct tcp_sock *tp = tcp_sk(sk);
@@ -108,6 +107,9 @@ static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, ktime_t
108 if (icsk->icsk_ca_state == TCP_CA_Open) 107 if (icsk->icsk_ca_state == TCP_CA_Open)
109 ca->pkts_acked = pkts_acked; 108 ca->pkts_acked = pkts_acked;
110 109
110 if (rtt > 0)
111 measure_rtt(sk, usecs_to_jiffies(rtt));
112
111 if (!use_bandwidth_switch) 113 if (!use_bandwidth_switch)
112 return; 114 return;
113 115
@@ -237,8 +239,6 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack,
237 if (tp->snd_cwnd <= tp->snd_ssthresh) 239 if (tp->snd_cwnd <= tp->snd_ssthresh)
238 tcp_slow_start(tp); 240 tcp_slow_start(tp);
239 else { 241 else {
240 measure_rtt(sk);
241
242 /* In dangerous area, increase slowly. 242 /* In dangerous area, increase slowly.
243 * In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd 243 * In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd
244 */ 244 */
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index cc5de6f69d46..64f1cbaf96e8 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -83,18 +83,16 @@ static void tcp_illinois_init(struct sock *sk)
83} 83}
84 84
85/* Measure RTT for each ack. */ 85/* Measure RTT for each ack. */
86static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, ktime_t last) 86static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, s32 rtt)
87{ 87{
88 struct illinois *ca = inet_csk_ca(sk); 88 struct illinois *ca = inet_csk_ca(sk);
89 u32 rtt;
90 89
91 ca->acked = pkts_acked; 90 ca->acked = pkts_acked;
92 91
93 if (ktime_equal(last, net_invalid_timestamp())) 92 /* dup ack, no rtt sample */
93 if (rtt < 0)
94 return; 94 return;
95 95
96 rtt = ktime_to_us(net_timedelta(last));
97
98 /* ignore bogus values, this prevents wraparound in alpha math */ 96 /* ignore bogus values, this prevents wraparound in alpha math */
99 if (rtt > RTT_MAX) 97 if (rtt > RTT_MAX)
100 rtt = RTT_MAX; 98 rtt = RTT_MAX;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index fec8a7a4dbaf..378ca8a086a3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1851,19 +1851,22 @@ static inline u32 tcp_cwnd_min(const struct sock *sk)
1851} 1851}
1852 1852
1853/* Decrease cwnd each second ack. */ 1853/* Decrease cwnd each second ack. */
1854static void tcp_cwnd_down(struct sock *sk) 1854static void tcp_cwnd_down(struct sock *sk, int flag)
1855{ 1855{
1856 struct tcp_sock *tp = tcp_sk(sk); 1856 struct tcp_sock *tp = tcp_sk(sk);
1857 int decr = tp->snd_cwnd_cnt + 1; 1857 int decr = tp->snd_cwnd_cnt + 1;
1858 1858
1859 tp->snd_cwnd_cnt = decr&1; 1859 if ((flag&FLAG_FORWARD_PROGRESS) ||
1860 decr >>= 1; 1860 (IsReno(tp) && !(flag&FLAG_NOT_DUP))) {
1861 tp->snd_cwnd_cnt = decr&1;
1862 decr >>= 1;
1861 1863
1862 if (decr && tp->snd_cwnd > tcp_cwnd_min(sk)) 1864 if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
1863 tp->snd_cwnd -= decr; 1865 tp->snd_cwnd -= decr;
1864 1866
1865 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1); 1867 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
1866 tp->snd_cwnd_stamp = tcp_time_stamp; 1868 tp->snd_cwnd_stamp = tcp_time_stamp;
1869 }
1867} 1870}
1868 1871
1869/* Nothing was retransmitted or returned timestamp is less 1872/* Nothing was retransmitted or returned timestamp is less
@@ -2060,7 +2063,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
2060 } 2063 }
2061 tcp_moderate_cwnd(tp); 2064 tcp_moderate_cwnd(tp);
2062 } else { 2065 } else {
2063 tcp_cwnd_down(sk); 2066 tcp_cwnd_down(sk, flag);
2064 } 2067 }
2065} 2068}
2066 2069
@@ -2109,7 +2112,10 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
2109{ 2112{
2110 struct inet_connection_sock *icsk = inet_csk(sk); 2113 struct inet_connection_sock *icsk = inet_csk(sk);
2111 struct tcp_sock *tp = tcp_sk(sk); 2114 struct tcp_sock *tp = tcp_sk(sk);
2112 int is_dupack = (tp->snd_una == prior_snd_una && !(flag&FLAG_NOT_DUP)); 2115 int is_dupack = (tp->snd_una == prior_snd_una &&
2116 (!(flag&FLAG_NOT_DUP) ||
2117 ((flag&FLAG_DATA_SACKED) &&
2118 (tp->fackets_out > tp->reordering))));
2113 2119
2114 /* Some technical things: 2120 /* Some technical things:
2115 * 1. Reno does not count dupacks (sacked_out) automatically. */ 2121 * 1. Reno does not count dupacks (sacked_out) automatically. */
@@ -2260,7 +2266,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
2260 2266
2261 if (is_dupack || tcp_head_timedout(sk)) 2267 if (is_dupack || tcp_head_timedout(sk))
2262 tcp_update_scoreboard(sk); 2268 tcp_update_scoreboard(sk);
2263 tcp_cwnd_down(sk); 2269 tcp_cwnd_down(sk, flag);
2264 tcp_xmit_retransmit_queue(sk); 2270 tcp_xmit_retransmit_queue(sk);
2265} 2271}
2266 2272
@@ -2490,12 +2496,23 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
2490 tcp_ack_update_rtt(sk, acked, seq_rtt); 2496 tcp_ack_update_rtt(sk, acked, seq_rtt);
2491 tcp_ack_packets_out(sk); 2497 tcp_ack_packets_out(sk);
2492 2498
2493 /* Is the ACK triggering packet unambiguous? */ 2499 if (ca_ops->pkts_acked) {
2494 if (acked & FLAG_RETRANS_DATA_ACKED) 2500 s32 rtt_us = -1;
2495 last_ackt = net_invalid_timestamp(); 2501
2502 /* Is the ACK triggering packet unambiguous? */
2503 if (!(acked & FLAG_RETRANS_DATA_ACKED)) {
2504 /* High resolution needed and available? */
2505 if (ca_ops->flags & TCP_CONG_RTT_STAMP &&
2506 !ktime_equal(last_ackt,
2507 net_invalid_timestamp()))
2508 rtt_us = ktime_us_delta(ktime_get_real(),
2509 last_ackt);
2510 else if (seq_rtt > 0)
2511 rtt_us = jiffies_to_usecs(seq_rtt);
2512 }
2496 2513
2497 if (ca_ops->pkts_acked) 2514 ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
2498 ca_ops->pkts_acked(sk, pkts_acked, last_ackt); 2515 }
2499 } 2516 }
2500 2517
2501#if FASTRETRANS_DEBUG > 0 2518#if FASTRETRANS_DEBUG > 0
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index 80e140e3ec2d..e7f5ef92cbd8 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -260,13 +260,13 @@ static void tcp_lp_rtt_sample(struct sock *sk, u32 rtt)
260 * newReno in increase case. 260 * newReno in increase case.
261 * We work it out by following the idea from TCP-LP's paper directly 261 * We work it out by following the idea from TCP-LP's paper directly
262 */ 262 */
263static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, ktime_t last) 263static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
264{ 264{
265 struct tcp_sock *tp = tcp_sk(sk); 265 struct tcp_sock *tp = tcp_sk(sk);
266 struct lp *lp = inet_csk_ca(sk); 266 struct lp *lp = inet_csk_ca(sk);
267 267
268 if (!ktime_equal(last, net_invalid_timestamp())) 268 if (rtt_us > 0)
269 tcp_lp_rtt_sample(sk, ktime_to_us(net_timedelta(last))); 269 tcp_lp_rtt_sample(sk, rtt_us);
270 270
271 /* calc inference */ 271 /* calc inference */
272 if (tcp_time_stamp > tp->rx_opt.rcv_tsecr) 272 if (tcp_time_stamp > tp->rx_opt.rcv_tsecr)
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 914e0307f7af..b49dedcda52d 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -112,16 +112,16 @@ EXPORT_SYMBOL_GPL(tcp_vegas_init);
112 * o min-filter RTT samples from a much longer window (forever for now) 112 * o min-filter RTT samples from a much longer window (forever for now)
113 * to find the propagation delay (baseRTT) 113 * to find the propagation delay (baseRTT)
114 */ 114 */
115void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, ktime_t last) 115void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us)
116{ 116{
117 struct vegas *vegas = inet_csk_ca(sk); 117 struct vegas *vegas = inet_csk_ca(sk);
118 u32 vrtt; 118 u32 vrtt;
119 119
120 if (ktime_equal(last, net_invalid_timestamp())) 120 if (rtt_us < 0)
121 return; 121 return;
122 122
123 /* Never allow zero rtt or baseRTT */ 123 /* Never allow zero rtt or baseRTT */
124 vrtt = ktime_to_us(net_timedelta(last)) + 1; 124 vrtt = rtt_us + 1;
125 125
126 /* Filter to find propagation delay: */ 126 /* Filter to find propagation delay: */
127 if (vrtt < vegas->baseRTT) 127 if (vrtt < vegas->baseRTT)
diff --git a/net/ipv4/tcp_vegas.h b/net/ipv4/tcp_vegas.h
index 502fa8183634..6c0eea2f8249 100644
--- a/net/ipv4/tcp_vegas.h
+++ b/net/ipv4/tcp_vegas.h
@@ -17,7 +17,7 @@ struct vegas {
17 17
18extern void tcp_vegas_init(struct sock *sk); 18extern void tcp_vegas_init(struct sock *sk);
19extern void tcp_vegas_state(struct sock *sk, u8 ca_state); 19extern void tcp_vegas_state(struct sock *sk, u8 ca_state);
20extern void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, ktime_t last); 20extern void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us);
21extern void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event); 21extern void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event);
22extern void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb); 22extern void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb);
23 23
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index 7a55ddf86032..8fb2aee0b1a4 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -69,16 +69,16 @@ static void tcp_veno_init(struct sock *sk)
69} 69}
70 70
71/* Do rtt sampling needed for Veno. */ 71/* Do rtt sampling needed for Veno. */
72static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, ktime_t last) 72static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us)
73{ 73{
74 struct veno *veno = inet_csk_ca(sk); 74 struct veno *veno = inet_csk_ca(sk);
75 u32 vrtt; 75 u32 vrtt;
76 76
77 if (ktime_equal(last, net_invalid_timestamp())) 77 if (rtt_us < 0)
78 return; 78 return;
79 79
80 /* Never allow zero rtt or baseRTT */ 80 /* Never allow zero rtt or baseRTT */
81 vrtt = ktime_to_us(net_timedelta(last)) + 1; 81 vrtt = rtt_us + 1;
82 82
83 /* Filter to find propagation delay: */ 83 /* Filter to find propagation delay: */
84 if (vrtt < veno->basertt) 84 if (vrtt < veno->basertt)
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index e61e09dd513e..20151d6a6241 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -100,11 +100,12 @@ static void westwood_filter(struct westwood *w, u32 delta)
100 * Called after processing group of packets. 100 * Called after processing group of packets.
101 * but all westwood needs is the last sample of srtt. 101 * but all westwood needs is the last sample of srtt.
102 */ 102 */
103static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, ktime_t last) 103static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt)
104{ 104{
105 struct westwood *w = inet_csk_ca(sk); 105 struct westwood *w = inet_csk_ca(sk);
106 if (cnt > 0) 106
107 w->rtt = tcp_sk(sk)->srtt >> 3; 107 if (rtt > 0)
108 w->rtt = usecs_to_jiffies(rtt);
108} 109}
109 110
110/* 111/*
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index c04b7c6ec702..c107fba7430e 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -58,7 +58,7 @@ static void tcp_yeah_init(struct sock *sk)
58} 58}
59 59
60 60
61static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, ktime_t last) 61static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us)
62{ 62{
63 const struct inet_connection_sock *icsk = inet_csk(sk); 63 const struct inet_connection_sock *icsk = inet_csk(sk);
64 struct yeah *yeah = inet_csk_ca(sk); 64 struct yeah *yeah = inet_csk_ca(sk);
@@ -66,7 +66,7 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, ktime_t last)
66 if (icsk->icsk_ca_state == TCP_CA_Open) 66 if (icsk->icsk_ca_state == TCP_CA_Open)
67 yeah->pkts_acked = pkts_acked; 67 yeah->pkts_acked = pkts_acked;
68 68
69 tcp_vegas_pkts_acked(sk, pkts_acked, last); 69 tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us);
70} 70}
71 71
72static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, 72static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack,