aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorJohn Heffner <jheffner@psc.edu>2006-10-18 23:36:48 -0400
committerDavid S. Miller <davem@davemloft.net>2006-10-18 23:36:48 -0400
commitae8064ac32d07f609114d73928cdef803be87134 (patch)
treea783594abc5ca2f795992664684ab46581625fac /net/ipv4/tcp_output.c
parentb52f070c9c3c09ed3b7f699280193aae7e25d816 (diff)
[TCP]: Bound TSO defer time
This patch limits the amount of time you will defer sending a TSO segment to less than two clock ticks, or the time between two acks, whichever is longer. On slow links, deferring causes significant bursts. See attached plots, which show RTT through a 1 Mbps link with a 100 ms RTT and ~100 ms queue for (a) non-TSO, (b) currnet TSO, and (c) patched TSO. This burstiness causes significant jitter, tends to overflow queues early (bad for short queues), and makes delay-based congestion control more difficult. Deferring by a couple clock ticks I believe will have a relatively small impact on performance. Signed-off-by: John Heffner <jheffner@psc.edu> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index f22536e32cb1..ca406157724c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1096,10 +1096,14 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_
1096 u32 send_win, cong_win, limit, in_flight; 1096 u32 send_win, cong_win, limit, in_flight;
1097 1097
1098 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) 1098 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
1099 return 0; 1099 goto send_now;
1100 1100
1101 if (icsk->icsk_ca_state != TCP_CA_Open) 1101 if (icsk->icsk_ca_state != TCP_CA_Open)
1102 return 0; 1102 goto send_now;
1103
1104 /* Defer for less than two clock ticks. */
1105 if (!tp->tso_deferred && ((jiffies<<1)>>1) - (tp->tso_deferred>>1) > 1)
1106 goto send_now;
1103 1107
1104 in_flight = tcp_packets_in_flight(tp); 1108 in_flight = tcp_packets_in_flight(tp);
1105 1109
@@ -1115,7 +1119,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_
1115 1119
1116 /* If a full-sized TSO skb can be sent, do it. */ 1120 /* If a full-sized TSO skb can be sent, do it. */
1117 if (limit >= 65536) 1121 if (limit >= 65536)
1118 return 0; 1122 goto send_now;
1119 1123
1120 if (sysctl_tcp_tso_win_divisor) { 1124 if (sysctl_tcp_tso_win_divisor) {
1121 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1125 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
@@ -1125,7 +1129,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_
1125 */ 1129 */
1126 chunk /= sysctl_tcp_tso_win_divisor; 1130 chunk /= sysctl_tcp_tso_win_divisor;
1127 if (limit >= chunk) 1131 if (limit >= chunk)
1128 return 0; 1132 goto send_now;
1129 } else { 1133 } else {
1130 /* Different approach, try not to defer past a single 1134 /* Different approach, try not to defer past a single
1131 * ACK. Receiver should ACK every other full sized 1135 * ACK. Receiver should ACK every other full sized
@@ -1133,11 +1137,17 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_
1133 * then send now. 1137 * then send now.
1134 */ 1138 */
1135 if (limit > tcp_max_burst(tp) * tp->mss_cache) 1139 if (limit > tcp_max_burst(tp) * tp->mss_cache)
1136 return 0; 1140 goto send_now;
1137 } 1141 }
1138 1142
1139 /* Ok, it looks like it is advisable to defer. */ 1143 /* Ok, it looks like it is advisable to defer. */
1144 tp->tso_deferred = 1 | (jiffies<<1);
1145
1140 return 1; 1146 return 1;
1147
1148send_now:
1149 tp->tso_deferred = 0;
1150 return 0;
1141} 1151}
1142 1152
1143/* Create a new MTU probe if we are ready. 1153/* Create a new MTU probe if we are ready.