aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorNeal Cardwell <ncardwell@google.com>2012-02-12 13:37:10 -0500
committerDavid S. Miller <davem@davemloft.net>2012-02-13 01:00:22 -0500
commitdaef52bab1fd26e24e8e9578f8fb33ba1d0cb412 (patch)
tree93c2cb04a8861280aefcb94961e81e4d5a580095 /net
parentcc9a672ee522d4805495b98680f4a3db5d0a0af9 (diff)
tcp: fix range tcp_shifted_skb() passes to tcp_sacktag_one()
Fix the newly-SACKed range to be the range of newly-shifted bytes. Previously - since 832d11c5cd076abc0aa1eaf7be96c81d1a59ce41 - tcp_shifted_skb() incorrectly called tcp_sacktag_one() with the start and end sequence numbers of the skb it passes in set to the range just beyond the range that is newly-SACKed. This commit also removes a special-case adjustment to lost_cnt_hint in tcp_shifted_skb() since the pre-existing adjustment of lost_cnt_hint in tcp_sacktag_one() now properly handles this things now that the correct start sequence number is passed in. Signed-off-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp_input.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4e8a81fda65c..8116d06e042c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1388,6 +1388,9 @@ static u8 tcp_sacktag_one(struct sock *sk,
1388 return sacked; 1388 return sacked;
1389} 1389}
1390 1390
1391/* Shift newly-SACKed bytes from this skb to the immediately previous
1392 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
1393 */
1391static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, 1394static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1392 struct tcp_sacktag_state *state, 1395 struct tcp_sacktag_state *state,
1393 unsigned int pcount, int shifted, int mss, 1396 unsigned int pcount, int shifted, int mss,
@@ -1395,12 +1398,11 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1395{ 1398{
1396 struct tcp_sock *tp = tcp_sk(sk); 1399 struct tcp_sock *tp = tcp_sk(sk);
1397 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); 1400 struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
1401 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */
1402 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */
1398 1403
1399 BUG_ON(!pcount); 1404 BUG_ON(!pcount);
1400 1405
1401 if (skb == tp->lost_skb_hint)
1402 tp->lost_cnt_hint += pcount;
1403
1404 TCP_SKB_CB(prev)->end_seq += shifted; 1406 TCP_SKB_CB(prev)->end_seq += shifted;
1405 TCP_SKB_CB(skb)->seq += shifted; 1407 TCP_SKB_CB(skb)->seq += shifted;
1406 1408
@@ -1424,12 +1426,11 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1424 skb_shinfo(skb)->gso_type = 0; 1426 skb_shinfo(skb)->gso_type = 0;
1425 } 1427 }
1426 1428
1427 /* We discard results */ 1429 /* Adjust counters and hints for the newly sacked sequence range but
1428 tcp_sacktag_one(sk, state, 1430 * discard the return value since prev is already marked.
1429 TCP_SKB_CB(skb)->sacked, 1431 */
1430 TCP_SKB_CB(skb)->seq, 1432 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
1431 TCP_SKB_CB(skb)->end_seq, 1433 start_seq, end_seq, dup_sack, pcount);
1432 dup_sack, pcount);
1433 1434
1434 /* Difference in this won't matter, both ACKed by the same cumul. ACK */ 1435 /* Difference in this won't matter, both ACKed by the same cumul. ACK */
1435 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); 1436 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);