diff options
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index d9034073138c..d703a0b3b6a2 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1305,7 +1305,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev, | |||
1305 | */ | 1305 | */ |
1306 | tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, | 1306 | tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, |
1307 | start_seq, end_seq, dup_sack, pcount, | 1307 | start_seq, end_seq, dup_sack, pcount, |
1308 | skb->skb_mstamp); | 1308 | tcp_skb_timestamp_us(skb)); |
1309 | tcp_rate_skb_delivered(sk, skb, state->rate); | 1309 | tcp_rate_skb_delivered(sk, skb, state->rate); |
1310 | 1310 | ||
1311 | if (skb == tp->lost_skb_hint) | 1311 | if (skb == tp->lost_skb_hint) |
@@ -1580,7 +1580,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, | |||
1580 | TCP_SKB_CB(skb)->end_seq, | 1580 | TCP_SKB_CB(skb)->end_seq, |
1581 | dup_sack, | 1581 | dup_sack, |
1582 | tcp_skb_pcount(skb), | 1582 | tcp_skb_pcount(skb), |
1583 | skb->skb_mstamp); | 1583 | tcp_skb_timestamp_us(skb)); |
1584 | tcp_rate_skb_delivered(sk, skb, state->rate); | 1584 | tcp_rate_skb_delivered(sk, skb, state->rate); |
1585 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) | 1585 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) |
1586 | list_del_init(&skb->tcp_tsorted_anchor); | 1586 | list_del_init(&skb->tcp_tsorted_anchor); |
@@ -3103,7 +3103,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, | |||
3103 | tp->retrans_out -= acked_pcount; | 3103 | tp->retrans_out -= acked_pcount; |
3104 | flag |= FLAG_RETRANS_DATA_ACKED; | 3104 | flag |= FLAG_RETRANS_DATA_ACKED; |
3105 | } else if (!(sacked & TCPCB_SACKED_ACKED)) { | 3105 | } else if (!(sacked & TCPCB_SACKED_ACKED)) { |
3106 | last_ackt = skb->skb_mstamp; | 3106 | last_ackt = tcp_skb_timestamp_us(skb); |
3107 | WARN_ON_ONCE(last_ackt == 0); | 3107 | WARN_ON_ONCE(last_ackt == 0); |
3108 | if (!first_ackt) | 3108 | if (!first_ackt) |
3109 | first_ackt = last_ackt; | 3109 | first_ackt = last_ackt; |
@@ -3121,7 +3121,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, | |||
3121 | tp->delivered += acked_pcount; | 3121 | tp->delivered += acked_pcount; |
3122 | if (!tcp_skb_spurious_retrans(tp, skb)) | 3122 | if (!tcp_skb_spurious_retrans(tp, skb)) |
3123 | tcp_rack_advance(tp, sacked, scb->end_seq, | 3123 | tcp_rack_advance(tp, sacked, scb->end_seq, |
3124 | skb->skb_mstamp); | 3124 | tcp_skb_timestamp_us(skb)); |
3125 | } | 3125 | } |
3126 | if (sacked & TCPCB_LOST) | 3126 | if (sacked & TCPCB_LOST) |
3127 | tp->lost_out -= acked_pcount; | 3127 | tp->lost_out -= acked_pcount; |
@@ -3215,7 +3215,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, | |||
3215 | tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); | 3215 | tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); |
3216 | } | 3216 | } |
3217 | } else if (skb && rtt_update && sack_rtt_us >= 0 && | 3217 | } else if (skb && rtt_update && sack_rtt_us >= 0 && |
3218 | sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) { | 3218 | sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, |
3219 | tcp_skb_timestamp_us(skb))) { | ||
3219 | /* Do not re-arm RTO if the sack RTT is measured from data sent | 3220 | /* Do not re-arm RTO if the sack RTT is measured from data sent |
3220 | * after when the head was last (re)transmitted. Otherwise the | 3221 | * after when the head was last (re)transmitted. Otherwise the |
3221 | * timeout may continue to extend in loss recovery. | 3222 | * timeout may continue to extend in loss recovery. |