aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2007-10-11 20:36:13 -0400
committerDavid S. Miller <davem@davemloft.net>2007-10-11 20:36:13 -0400
commitb08d6cb22c777c8c91c16d8e3b8aafc93c98cbd9 (patch)
tree139b1f2636c42698bd7b0f0ccd61f0e1b8a826ab /net/ipv4/tcp_input.c
parentf785a8e28b9d103c7473655743b6ac1bc3cd3a58 (diff)
[TCP]: Limit processing lost_retrans loop to work-to-do cases
This addition of lost_retrans_low to tcp_sock might be unnecessary, it's not clear how often lost_retrans worker is executed when there wasn't work to do. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c14
1 files changed, 11 insertions, 3 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d5e0fcc22a3b..0a42e9340346 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1112,7 +1112,8 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
1112 * 1112 *
1113 * Search retransmitted skbs from write_queue that were sent when snd_nxt was 1113 * Search retransmitted skbs from write_queue that were sent when snd_nxt was
1114 * less than what is now known to be received by the other end (derived from 1114 * less than what is now known to be received by the other end (derived from
1115 * SACK blocks by the caller). 1115 * SACK blocks by the caller). Also calculate the lowest snd_nxt among the
1116 * remaining retransmitted skbs to avoid some costly processing per ACKs.
1116 */ 1117 */
1117static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto) 1118static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto)
1118{ 1119{
@@ -1120,6 +1121,7 @@ static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto)
1120 struct sk_buff *skb; 1121 struct sk_buff *skb;
1121 int flag = 0; 1122 int flag = 0;
1122 int cnt = 0; 1123 int cnt = 0;
1124 u32 new_low_seq = 0;
1123 1125
1124 tcp_for_write_queue(skb, sk) { 1126 tcp_for_write_queue(skb, sk) {
1125 u32 ack_seq = TCP_SKB_CB(skb)->ack_seq; 1127 u32 ack_seq = TCP_SKB_CB(skb)->ack_seq;
@@ -1151,9 +1153,15 @@ static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto)
1151 NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT); 1153 NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT);
1152 } 1154 }
1153 } else { 1155 } else {
1156 if (!new_low_seq || before(ack_seq, new_low_seq))
1157 new_low_seq = ack_seq;
1154 cnt += tcp_skb_pcount(skb); 1158 cnt += tcp_skb_pcount(skb);
1155 } 1159 }
1156 } 1160 }
1161
1162 if (tp->retrans_out)
1163 tp->lost_retrans_low = new_low_seq;
1164
1157 return flag; 1165 return flag;
1158} 1166}
1159 1167
@@ -1481,8 +1489,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1481 } 1489 }
1482 } 1490 }
1483 1491
1484 if (tp->retrans_out && highest_sack_end_seq && 1492 if (tp->retrans_out &&
1485 after(highest_sack_end_seq, tp->high_seq) && 1493 after(highest_sack_end_seq, tp->lost_retrans_low) &&
1486 icsk->icsk_ca_state == TCP_CA_Recovery) 1494 icsk->icsk_ca_state == TCP_CA_Recovery)
1487 flag |= tcp_mark_lost_retrans(sk, highest_sack_end_seq); 1495 flag |= tcp_mark_lost_retrans(sk, highest_sack_end_seq);
1488 1496